Merge "Ensure sbin is in PATH."
diff --git a/.gitignore b/.gitignore
index b80b476..b0a65f5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,6 +5,7 @@
 *.pem
 .localrc.auto
 .prereqs
+.tox
 .stackenv
 accrc
 docs/files
diff --git a/.mailmap b/.mailmap
index 29be995..43e4e6e 100644
--- a/.mailmap
+++ b/.mailmap
@@ -4,3 +4,4 @@
 Jiajun Liu <jiajun@unitedstack.com> <iamljj@gmail.com>
 Jian Wen <jian.wen@canonical.com> <wenjianhn@gmail.com>
 Joe Gordon <joe.gordon0@gmail.com> <jogo@cloudscaling.com>
+Sean Dague <sean.dague@samsung.com> <sdague@linux.vnet.ibm.com> <sean@dague.net>
diff --git a/AUTHORS b/AUTHORS
deleted file mode 100644
index 04bff48..0000000
--- a/AUTHORS
+++ /dev/null
@@ -1,51 +0,0 @@
-Aaron Lee <aaron.lee@rackspace.com>
-Aaron Rosen <arosen@nicira.com>
-Adam Gandelman <adamg@canonical.com>
-Akihiro MOTOKI <motoki@da.jp.nec.com>
-Andrew Laski <andrew.laski@rackspace.com>
-Andy Smith <github@anarkystic.com>
-Anthony Young <sleepsonthefloor@gmail.com>
-Armando Migliaccio <armando.migliaccio@eu.citrix.com>
-Brad Hall <brad@nicira.com>
-Chmouel Boudjnah <chmouel@chmouel.com>
-Dan Prince <dprince@redhat.com>
-Dean Troyer <dtroyer@gmail.com>
-Devin Carlen <devin.carlen@gmail.com>
-Doug hellmann <doug.hellmann@dreamhost.com>
-Eddie Hebert <edhebert@gmail.com>
-Edgar Magana <emagana@gmail.com>
-Eoghan Glynn <eglynn@redhat.com>
-Eric Windisch <ewindisch@cloudscaling.com>
-Gabriel Hurley <gabriel@strikeawe.com>
-Gary Kotton <gkotton@redhat.com>
-Hengqing Hu <hudayou@hotmail.com>
-Hua ZHANG <zhuadl@cn.ibm.com>
-Isaku Yamahata <yamahata@private.email.ne.jp>
-Jake Dahn <admin@jakedahn.com>
-James E. Blair <james.blair@rackspace.com>
-Jason Cannavale <jason.cannavale@rackspace.com>
-Jay Pipes <jaypipes@gmail.com>
-Jesse Andrews <anotherjesse@gmail.com>
-Jian Wen <jian.wen@canonical.com>
-Joe Gordon <jogo@cloudscaling.com>
-Johannes Erdfelt <johannes.erdfelt@rackspace.com>
-John Postlethwait <john.postlethwait@nebula.com>
-Josh Kearney <josh@jk0.org>
-Justin Shepherd <galstrom21@gmail.com>
-Ken Pepple <ken.pepple@rabbityard.com>
-Kiall Mac Innes <kiall@managedit.ie>
-Matt Joyce <matt.joyce@cloudscaling.com>
-Osamu Habuka <xiu.yushen@gmail.com>
-Russell Bryant <rbryant@redhat.com>
-Scott Moser <smoser@ubuntu.com>
-Sean Dague <sdague@linux.vnet.ibm.com>
-Sumit Naiksatam <sumitnaiksatam@gmail.com>
-Thierry Carrez <thierry@openstack.org>
-Todd Willey <xtoddx@gmail.com>
-Tres Henry <tres@treshenry.net>
-Vincent Untz <vuntz@suse.com>
-Vishvananda Ishaya <vishvananda@gmail.com>
-Yun Mao <yunmao@gmail.com>
-Yong Sheng Gong <gongysh@cn.ibm.com>
-Zhongyue Luo <lzyeval@gmail.com>
-Zhenguo Niu <niu.zglinux@gmail.com>
diff --git a/HACKING.rst b/HACKING.rst
index 83455e3..d69bb49 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -17,7 +17,7 @@
 
 Contributing code to DevStack follows the usual OpenStack process as described
 in `How To Contribute`__ in the OpenStack wiki.  `DevStack's LaunchPad project`__
-contains the usual links for blueprints, bugs, tec.
+contains the usual links for blueprints, bugs, etc.
 
 __ contribute_
 .. _contribute: http://wiki.openstack.org/HowToContribute
diff --git a/MAINTAINERS.rst b/MAINTAINERS.rst
new file mode 100644
index 0000000..d754c08
--- /dev/null
+++ b/MAINTAINERS.rst
@@ -0,0 +1,98 @@
+MAINTAINERS
+===========
+
+
+Overview
+--------
+
+The following is a list of people known to have interests in
+particular areas or sub-systems of devstack.
+
+It is a rather general guide intended to help seed the initial
+reviewers list of a change.  A +1 on a review from someone identified
+as being a maintainer of its affected area is a very positive flag to
+the core team for the veracity of the change.
+
+The ``devstack-core`` group can still be added to all reviews.
+
+
+Format
+~~~~~~
+
+The format of the file is the name of the maintainer and their
+gerrit-registered email.
+
+
+Maintainers
+-----------
+
+.. contents:: :local:
+
+
+Ceph
+~~~~
+
+* Sebastien Han <sebastien.han@enovance.com>
+
+Cinder
+~~~~~~
+
+Fedora/CentOS/RHEL
+~~~~~~~~~~~~~~~~~~
+
+* Ian Wienand <iwienand@redhat.com>
+
+Neutron
+~~~~~~~
+
+OpenDaylight
+~~~~~~~~~~~~
+
+* Kyle Mestery <kmestery@cisco.com>
+
+OpenFlow Agent (ofagent)
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+* YAMAMOTO Takashi <yamamoto@valinux.co.jp>
+* Fumihiko Kakuma <kakuma@valinux.co.jp>
+
+Ryu
+~~~
+
+* YAMAMOTO Takashi <yamamoto@valinux.co.jp>
+* Fumihiko Kakuma <kakuma@valinux.co.jp>
+
+Sahara
+~~~~~~
+
+* Sergey Lukjanov <slukjanov@mirantis.com>
+
+Swift
+~~~~~
+
+* Chmouel Boudjnah <chmouel@enovance.com>
+
+SUSE
+~~~~
+
+* Ralf Haferkamp <rhafer@suse.de>
+* Vincent Untz <vuntz@suse.com>
+
+Tempest
+~~~~~~~
+
+Trove
+~~~~~
+
+* Nikhil Manchanda <SlickNik@gmail.com>
+* Michael Basnight <mbasnight@gmail.com>
+
+Xen
+~~~
+* Bob Ball <bob.ball@citrix.com>
+
+Zaqar (Marconi)
+~~~~~~~~~~~~~~~
+
+* Flavio Percoco <flaper87@gmail.com>
+* Malini Kamalambal <malini.kamalambal@rackspace.com>
diff --git a/clean.sh b/clean.sh
index e2374e7..db1a1e4 100755
--- a/clean.sh
+++ b/clean.sh
@@ -84,7 +84,6 @@
 fi
 
 # Clean projects
-cleanup_oslo
 cleanup_cinder
 cleanup_glance
 cleanup_keystone
diff --git a/docs/source/assets/images/quickstart.png b/docs/source/assets/images/quickstart.png
index 5f01bac..5400a6f 100644
--- a/docs/source/assets/images/quickstart.png
+++ b/docs/source/assets/images/quickstart.png
Binary files differ
diff --git a/docs/source/configuration.html b/docs/source/configuration.html
index c26aee4..fbcead7 100644
--- a/docs/source/configuration.html
+++ b/docs/source/configuration.html
@@ -58,7 +58,7 @@
           <h3>local.conf</h3>
           <p>The new configuration file is <code>local.conf</code> and resides in the root DevStack directory like the old <code>localrc</code> file.  It is a modified INI format file that introduces a meta-section header to carry additional information regarding the configuration files to be changed.</p>
 
-          <p>The new header is similar to a normal INI section header but with two '[[ ]]' chars and two internal fields separated by a pipe ('|'):</p>
+          <p>The new header is similar to a normal INI section header but with double brackets (<code>[[ ... ]]</code>) and two internal fields separated by a pipe (<code>|</code>):</p>
 <pre>[[ &lt;phase&gt; | &lt;config-file-name&gt; ]]
 </pre>
 
@@ -67,6 +67,8 @@
           <p>The defined phases are:</p>
           <ul>
             <li><strong>local</strong> - extracts <code>localrc</code> from <code>local.conf</code> before <code>stackrc</code> is sourced</li>
+            <li><strong>pre-install</strong> - runs after the system packages are installed but before any of the source repositories are installed</li>
+            <li><strong>install</strong> - runs immediately after the repo installations are complete</li>
             <li><strong>post-config</strong> - runs after the layer 2 services are configured and before they are started</li>
             <li><strong>extra</strong> - runs after services are started and before any files in <code>extra.d</code> are executed
           </ul>
@@ -96,7 +98,7 @@
 <pre>[[post-config|/$Q_PLUGIN_CONF_FILE]]
 </pre>
 
-          <p>The existing ``EXTRAS_OPTS`` and similar variables are now deprecated.  If used a warning will be printed at the end of the <code>stack.sh</code> run.</p>
+          <p>Also note that the <code>localrc</code> section is sourced as a shell script fragment amd <string>MUST</strong> conform to the shell requirements, specifically no whitespace around <code>=</code> (equals).</p>
 
           <a id="minimal"></a>
           <h3>Minimal Configuration</h3>
@@ -205,14 +207,6 @@
 
           <h3>Examples</h3>
           <ul>
-            <li>Convert EXTRA_OPTS from (<code>localrc</code>):
-<pre>EXTRA_OPTS=api_rate_limit=False
-</pre>
-    to (<code>local.conf</code>):
-<pre>[[post-config|$NOVA_CONF]]
-[DEFAULT]
-api_rate_limit = False
-</pre></li>
             <li>Eliminate a Cinder pass-through (<code>CINDER_PERIODIC_INTERVAL</code>):
 <pre>[[post-config|$CINDER_CONF]]
 [DEFAULT]
diff --git a/docs/source/contributing.html b/docs/source/contributing.html
index 8dbd179..f3d4b5a 100644
--- a/docs/source/contributing.html
+++ b/docs/source/contributing.html
@@ -59,20 +59,21 @@
           <br /><strong>HACKING.rst</strong>
           <p>Like most OpenStack projects, DevStack includes a <code>HACKING.rst</code> file that describes the layout, style and conventions of the project.  Because <code>HACKING.rst</code> is in the main DevStack repo it is considered authoritative.  Much of the content on this page is taken from there.</p>
 
-          <br /><strong>bash8 Formatting</strong>
-          <p>Around the time of the OpenStack Havana release we added a tool to do style checking in DevStack similar to what pep8/flake8 do for Python projects.  It is still _very_ simplistic, focusing mostly on stray whitespace to help prevent -1 on reviews that are otherwise acceptable.  Oddly enough it is called <code>bash8</code>.  It will be expanded to enforce some of the documentation rules in comments that are used in formatting the script pages for devstack.org and possibly even simple code formatting.  Run it on the entire project with <code>./run_tests.sh</code>.</p>
+          <br /><strong>bashate Formatting</strong>
+          <p>Around the time of the OpenStack Havana release we added a tool to do style checking in DevStack similar to what pep8/flake8 do for Python projects.  It is still _very_ simplistic, focusing mostly on stray whitespace to help prevent -1 on reviews that are otherwise acceptable.  Oddly enough it is called <code>bashate</code>.  It will be expanded to enforce some of the documentation rules in comments that are used in formatting the script pages for devstack.org and possibly even simple code formatting.  Run it on the entire project with <code>./run_tests.sh</code>.</p>
 
           <h3>Code</h3>
 
           <br /><strong>Repo Layout</strong>
           <p>The DevStack repo generally keeps all of the primary scripts at the root level.</p>
-          <p><code>exercises</code> - contains the test scripts used to validate and demonstrate some OpenStack functions.  These scripts know how to exit early or skip services that are not enabled.</p>
-          <p><code>extras.d</code> - contains the dispatch scripts called by the hooks in <code>stack.sh</code>, <code>unstack.sh</code> and <code>clean.sh</code>.  See <a href="plugins.html">the plugins docs</a> for more information.</p>
-          <p><code>files</code> - contains a variety of otherwise lost files used in configuring and operating DevStack.  This includes templates for configuration files and the system dependency information.  This is also where image files are downloaded and expanded if necessary.</p>
-          <p><code>lib</code> - contains the sub-scripts specific to each project.  This is where the work of managing a project's services is located.  Each top-level project (Keystone, Nova, etc) has a file here.  Additionally there are some for system services and project plugins.</p>
-          <p><code>samples</code> - contains a sample of the local files not included in the DevStack repo.</p>
+          <p><code>docs</code> - Contains the source for this website.  It is built using <code>tools/build_docs.sh</code>.</p>
+          <p><code>exercises</code> - Contains the test scripts used to validate and demonstrate some OpenStack functions.  These scripts know how to exit early or skip services that are not enabled.</p>
+          <p><code>extras.d</code> - Contains the dispatch scripts called by the hooks in <code>stack.sh</code>, <code>unstack.sh</code> and <code>clean.sh</code>.  See <a href="plugins.html">the plugins docs</a> for more information.</p>
+          <p><code>files</code> - Contains a variety of otherwise lost files used in configuring and operating DevStack.  This includes templates for configuration files and the system dependency information.  This is also where image files are downloaded and expanded if necessary.</p>
+          <p><code>lib</code> - Contains the sub-scripts specific to each project.  This is where the work of managing a project's services is located.  Each top-level project (Keystone, Nova, etc) has a file here.  Additionally there are some for system services and project plugins.</p>
+          <p><code>samples</code> - Contains a sample of the local files not included in the DevStack repo.</p>
           <p><code>tests</code> - the DevStack test suite is rather sparse, mostly consisting of test of specific fragile functions in the <code>functions</code> file.</p>
-          <p><code>tools</code> - contains a collection of stand-alone scripts, some of which have aged a bit (does anyone still do pamdisk installs?).  While these may reference the top-level DevStack configuration they can generally be run alone.  There are also some sub-directories to support specific environments such as XenServer and Docker.</p>
+          <p><code>tools</code> - Contains a collection of stand-alone scripts, some of which have aged a bit (does anyone still do ramdisk installs?).  While these may reference the top-level DevStack configuration they can generally be run alone.  There are also some sub-directories to support specific environments such as XenServer.</p>
           
 
 
diff --git a/docs/source/faq.html b/docs/source/faq.html
index bfac1dc..2c74a66 100644
--- a/docs/source/faq.html
+++ b/docs/source/faq.html
@@ -73,7 +73,7 @@
             <dd>A: DevStack is optimized for documentation &amp; developers.  As some of us use <a href="https://github.com/dellcloudedge/crowbar">Crowbar</a> for production deployments, we hope developers documenting how they setup systems for new features supports projects like Crowbar.</dd>
 
             <dt>Q: I'd like to help!</dt>
-            <dd>A: That isn't a question, but please do!  The source for DevStack is <a href="http://github.com/openstack-dev/devstack">github</a> and bug reports go to <a href="http://bugs.launchpad.net/devstack/">LaunchPad</a>.  Contributions follow the usual process as described in the <a href="http://wiki.openstack.org/HowToContribute">OpenStack wiki</a>. DevStack is not a core project but a gating project and therefore an official OpenStack project. This site is housed in the CloudBuilder's <a href="http://github.com/cloudbuilders/devstack">github</a> in the gh-pages branch.</dd>
+            <dd>A: That isn't a question, but please do!  The source for DevStack is <a href="http://github.com/openstack-dev/devstack">github</a> and bug reports go to <a href="http://bugs.launchpad.net/devstack/">LaunchPad</a>.  Contributions follow the usual process as described in the <a href="http://wiki.openstack.org/HowToContribute">OpenStack wiki</a> even though DevStack is not an official OpenStack project.  This site is housed in the CloudBuilder's <a href="http://github.com/cloudbuilders/devstack">github</a> in the gh-pages branch.</dd>
 
             <dt>Q: Why not use packages?</dt>
             <dd>A: Unlike packages, DevStack leaves your cloud ready to develop - checkouts of the code and services running in screen. However, many people are doing the hard work of packaging and recipes for production deployments.  We hope this script serves as a way to communicate configuration changes between developers and packagers.</dd>
@@ -85,7 +85,7 @@
             <dd>A: Fedora and CentOS/RHEL are supported via rpm dependency files and specific checks in <code>stack.sh</code>.  Support will follow the pattern set with the Ubuntu testing, i.e. only a single release of the distro will receive regular testing, others will be handled on a best-effort basis.</dd>
 
             <dt>Q: Are there any differences between Ubuntu and Fedora support?</dt>
-            <dd>A: LXC support is not complete on Fedora; Neutron is not fully supported prior to Fedora 18 due lack of OpenVSwitch packages.</dd>
+            <dd>A: Neutron is not fully supported prior to Fedora 18 due lack of OpenVSwitch packages.</dd>
 
             <dt>Q: How about RHEL 6?</dt>
             <dd>A: RHEL 6 has Python 2.6 and many old modules packaged and is a challenge to support.  There are a number of specific RHEL6 work-arounds in <code>stack.sh</code> to handle this.  But the testing on py26 is valuable so we do it...</dd>
diff --git a/docs/source/guides/multinode-lab.html b/docs/source/guides/multinode-lab.html
index 28a6585..a286954 100644
--- a/docs/source/guides/multinode-lab.html
+++ b/docs/source/guides/multinode-lab.html
@@ -54,7 +54,7 @@
         </div>
         
         <h3>Minimal Install</h3>
-        <p>You need to have a fresh install of Linux on all of your nodes.  You can download the <a href="https://help.ubuntu.com/community/Installation/MinimalCD">Minimal CD</a> for Ubuntu 12.04 (only 27MB) since DevStack will download &amp; install all the additional dependencies.  The netinstall ISO is available for <a href="http://mirrors.kernel.org/fedora/releases/20/Fedora/x86_64/iso/Fedora-20-x86_64-netinst.iso">Fedora</a> and <a href="http://mirrors.kernel.org/centos/6.5/isos/x86_64/CentOS-6.5-x86_64-netinstall.iso">CentOS/RHEL</a>.</p>
+        <p>You need to have a system with a fresh install of Linux.  You can download the <a href="https://help.ubuntu.com/community/Installation/MinimalCD">Minimal CD</a> for Ubuntu releases since DevStack will download &amp; install all the additional dependencies.  The netinstall ISO is available for <a href="http://mirrors.kernel.org/fedora/releases/18/Fedora/x86_64/iso/Fedora-20-x86_64-netinst.iso">Fedora</a> and <a href="http://mirrors.kernel.org/centos/6.5/isos/x86_64/CentOS-6.5-x86_64-netinstall.iso">CentOS/RHEL</a>.</p>
 
         <p>Install a couple of packages to bootstrap configuration:</p>
         <pre>apt-get install -y git sudo || yum install -y git sudo</pre>
@@ -184,7 +184,13 @@
 MYSQL_HOST=192.168.42.11
 RABBIT_HOST=192.168.42.11
 GLANCE_HOSTPORT=192.168.42.11:9292
-ENABLED_SERVICES=n-cpu,n-net,n-api,c-sch,c-api,c-vol</pre>
+ENABLED_SERVICES=n-cpu,n-net,n-api,c-sch,c-api,c-vol
+NOVA_VNC_ENABLED=True
+NOVNCPROXY_URL="http://192.168.42.11:6080/vnc_auto.html"
+VNCSERVER_LISTEN=$HOST_IP
+VNCSERVER_PROXYCLIENT_ADDRESS=$VNCSERVER_LISTEN
+</pre>
+
 <!-- save for vlan
 FLAT_INTERFACE=eth0.926
 -->
diff --git a/docs/source/guides/single-machine.html b/docs/source/guides/single-machine.html
index 2280793..ca9cafa 100644
--- a/docs/source/guides/single-machine.html
+++ b/docs/source/guides/single-machine.html
@@ -53,7 +53,7 @@
         </div>
         
         <h3>Minimal Install</h3>
-        <p>You need to have a system with a fresh install of Linux.  You can download the <a href="https://help.ubuntu.com/community/Installation/MinimalCD">Minimal CD</a> for Ubuntu 12.04 (only 27MB) since DevStack will download &amp; install all the additional dependencies.  The netinstall ISO is available for <a href="http://mirrors.kernel.org/fedora/releases/18/Fedora/x86_64/iso/Fedora-18-x86_64-netinst.iso">Fedora</a> and <a href="http://mirrors.kernel.org/centos/6.4/isos/x86_64/CentOS-6.4-x86_64-netinstall.iso">CentOS/RHEL</a>.  You may be tempted to use a desktop distro on a laptop, it will probably work but you may need to tell Network Manager to keep its fingers off the interface(s) that OpenStack uses for bridging.</p>
+        <p>You need to have a system with a fresh install of Linux.  You can download the <a href="https://help.ubuntu.com/community/Installation/MinimalCD">Minimal CD</a> for Ubuntu releases since DevStack will download &amp; install all the additional dependencies.  The netinstall ISO is available for <a href="http://mirrors.kernel.org/fedora/releases/18/Fedora/x86_64/iso/Fedora-20-x86_64-netinst.iso">Fedora</a> and <a href="http://mirrors.kernel.org/centos/6.5/isos/x86_64/CentOS-6.5-x86_64-netinstall.iso">CentOS/RHEL</a>.  You may be tempted to use a desktop distro on a laptop, it will probably work but you may need to tell Network Manager to keep its fingers off the interface(s) that OpenStack uses for bridging.</p>
         
         <h3>Network Configuration</h3>
         <p>Determine the network configuration on the interface used to integrate your 
diff --git a/docs/source/index.html b/docs/source/index.html
index 71c8c98..dada57d 100644
--- a/docs/source/index.html
+++ b/docs/source/index.html
@@ -76,7 +76,7 @@
         <ol>
             <li value="0">
               <h3>Select a Linux Distribution</h3>
-              <p>Only Ubuntu 12.04 (Precise), Fedora 20 and CentOS/RHEL 6.5 are documented here.  OpenStack also runs and is packaged on other flavors of Linux such as OpenSUSE and Debian.</p>
+              <p>Only Ubuntu 14.04 (Trusty), Fedora 20 and CentOS/RHEL 6.5 are documented here.  OpenStack also runs and is packaged on other flavors of Linux such as OpenSUSE and Debian.</p>
             </li>
             <li>
               <h3>Install Selected OS</h3>
@@ -89,7 +89,7 @@
             </li>
             <li>
               <h3>Configure</h3>
-              <p>While optional, we recommend a <a href="configuration.html">minimal configuration</a> be set up as you may not want our default values for everything.</p>
+              <p>We recommend at least a <a href="configuration.html">minimal configuration</a> be set up.</p>
             </li>
             <li>
               <h3>Start the install</h3>
@@ -231,6 +231,10 @@
                 <td><a href="functions.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
               </tr>
               <tr>
+                <td>functions-common</td>
+                <td><a href="functions-common.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
+              </tr>
+              <tr>
                 <td>lib/apache</td>
                 <td><a href="lib/apache.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
               </tr>
@@ -283,8 +287,8 @@
                 <td><a href="lib/ldap.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
               </tr>
               <tr>
-                <td>lib/marconi</td>
-                <td><a href="lib/marconi.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
+                <td>lib/zaqar</td>
+                <td><a href="lib/zaqar.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
               </tr>
               <tr>
                 <td>lib/neutron</td>
@@ -303,12 +307,12 @@
                 <td><a href="lib/rpc_backend.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
               </tr>
               <tr>
-                <td>lib/savanna</td>
-                <td><a href="lib/savanna.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
+                <td>lib/sahara</td>
+                <td><a href="lib/sahara.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
               </tr>
               <tr>
-                <td>lib/savanna-dashboard</td>
-                <td><a href="lib/savanna-dashboard.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
+                <td>lib/savanna</td>
+                <td><a href="lib/savanna.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
               </tr>
               <tr>
                 <td>lib/stackforge</td>
@@ -343,49 +347,34 @@
                 <td><a href="run_tests.sh.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
               </tr>
               <tr>
-                <td>extras.d/70-marconi.sh</td>
-                <td><a href="extras.d/70-marconi.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
+                <td>extras.d/50-ironic.sh</td>
+                <td><a href="extras.d/50-ironic.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
+              </tr>
+              <tr>
+                <td>extras.d/70-zaqar.sh</td>
+                <td><a href="extras.d/70-zaqar.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
+              </tr>
+              <tr>
+                <td>extras.d/70-sahara.sh</td>
+                <td><a href="extras.d/70-sahara.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
               </tr>
               <tr>
                 <td>extras.d/70-savanna.sh</td>
                 <td><a href="extras.d/70-savanna.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
               </tr>
               <tr>
+                <td>extras.d/70-trove.sh</td>
+                <td><a href="extras.d/70-trove.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
+              </tr>
+              <tr>
+                <td>extras.d/80-opendaylight.sh</td>
+                <td><a href="extras.d/80-opendaylight.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
+              </tr>
+              <tr>
                 <td>extras.d/80-tempest.sh</td>
                 <td><a href="extras.d/80-tempest.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
               </tr>
-              <tr>
-                <td>tools/info.sh</td>
-                <td><a href="tools/info.sh.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
-              </tr>
-              <tr>
-                <td>tools/build_docs.sh</td>
-                <td><a href="tools/build_docs.sh.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
-              </tr>
-              <tr>
-                <td>tools/create_userrc.sh</td>
-                <td><a href="tools/create_userrc.sh.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
-              </tr>
-              <tr>
-                <td>tools/fixup_stuff.sh</td>
-                <td><a href="tools/fixup_stuff.sh.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
-              </tr>
-              <tr>
-                <td>tools/install_prereqs.sh</td>
-                <td><a href="tools/install_prereqs.sh.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
-              </tr>
-              <tr>
-                <td>tools/install_pip.sh</td>
-                <td><a href="tools/install_pip.sh.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
-              </tr>
-              <tr>
-                <td>tools/upload_image.sh</td>
-                <td><a href="tools/upload_image.sh.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
-              </tr>
             </tbody>
-            <tfoot>
-              <td colspan="3">40 bash scripts</td>
-            </tfoot>
           </table>
         </div>
 
@@ -420,9 +409,46 @@
                 <td><a href="eucarc.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
               </tr>
             </tbody>
-            <tfoot>
-              <td colspan="3">5 configuration files</td>
-            </tfoot>
+          </table>
+
+          <h2>Tools <small>Support scripts</small></h2>
+          <table class='table table-striped table-bordered'>
+            <thead>
+              <tr>
+                <th>Filename</th>
+                <th>Link</th>
+              </tr>
+            </thead>
+            <tbody>
+              <tr>
+                <td>tools/info.sh</td>
+                <td><a href="tools/info.sh.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
+              </tr>
+              <tr>
+                <td>tools/build_docs.sh</td>
+                <td><a href="tools/build_docs.sh.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
+              </tr>
+              <tr>
+                <td>tools/create_userrc.sh</td>
+                <td><a href="tools/create_userrc.sh.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
+              </tr>
+              <tr>
+                <td>tools/fixup_stuff.sh</td>
+                <td><a href="tools/fixup_stuff.sh.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
+              </tr>
+              <tr>
+                <td>tools/install_prereqs.sh</td>
+                <td><a href="tools/install_prereqs.sh.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
+              </tr>
+              <tr>
+                <td>tools/install_pip.sh</td>
+                <td><a href="tools/install_pip.sh.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
+              </tr>
+              <tr>
+                <td>tools/upload_image.sh</td>
+                <td><a href="tools/upload_image.sh.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
+              </tr>
+            </tbody>
           </table>
 
           <h2>Samples <small>Generated documentation of DevStack sample files.</small></h2>
@@ -443,9 +469,6 @@
                 <td><a href="samples/localrc.html" class="btn btn-small btn-success table-action">Read &raquo;</a></td>
               </tr>
             </tbody>
-            <tfoot>
-              <td colspan="3">2 sample files</td>
-            </tfoot>
           </table>
 
         <div class='row span5 pull-right'>
@@ -498,6 +521,12 @@
                 <td>exercises/neutron-adv-test.sh</td>
                 <td><a href="exercises/neutron-adv-test.sh.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
               </tr>
+                <td>exercises/sahara.sh</td>
+                <td><a href="exercises/sahara.sh.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
+              </tr>
+                <td>exercises/savanna.sh</td>
+                <td><a href="exercises/savanna.sh.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
+              </tr>
               <tr>
                 <td>exercises/sec_groups.sh</td>
                 <td><a href="exercises/sec_groups.sh.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
@@ -506,14 +535,18 @@
                 <td>exercises/swift.sh</td>
                 <td><a href="exercises/swift.sh.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
               </tr>
+                <td>exercises/trove.sh</td>
+                <td><a href="exercises/trove.sh.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
+              </tr>
               <tr>
                 <td>exercises/volumes.sh</td>
                 <td><a href="exercises/volumes.sh.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
               </tr>
+              <tr>
+                <td>exercises/zaqar.sh</td>
+                <td><a href="exercises/zaqar.sh.html" class="btn btn-small btn-primary table-action">Read &raquo;</a></td>
+              </tr>
             </tbody>
-            <tfoot>
-              <td colspan="3">13 exercise scripts</td>
-            </tfoot>
           </table>
 
         </div>
diff --git a/docs/source/overview.html b/docs/source/overview.html
index c0b6ea2..baee400 100644
--- a/docs/source/overview.html
+++ b/docs/source/overview.html
@@ -47,8 +47,8 @@
 
         <div class='row pull-left'>
           <h2>Overview <small>DevStack from a cloud-height view</small></h2>
-          <p>DevStack is not and has never been intended to be a general OpenStack installer.  It has evolved to support a large number of configuration options and alternative platforms and support services.  However, that evolution has grown well beyond what was originally intended and the majority of configuration combinations are rarely, if ever, tested.  DevStack was never meant to be everything to everyone and can not continue in that direction.</p>
-          <p>Below is a list of what is specifically is supported (read that as "tested and assumed to work") going forward.</p>
+          <p>DevStack has evolved to support a large number of configuration options and alternative platforms and support services.  That evolution has grown well beyond what was originally intended and the majority of configuration combinations are rarely, if ever, tested.  DevStack is not a general OpenStack installer and was never meant to be everything to everyone..</p>
+          <p>Below is a list of what is specifically is supported (read that as "tested") going forward.</p>
 
           <h2>Supported Components</h2>
 
@@ -93,7 +93,7 @@
           </ul>
 
           <h3>Services</h3>
-          <p>The default services configured by DevStack are Identity (Keystone), Object Storage (Swift), Image Storage (Glance), Block Storage (Cinder), Compute (Nova), Network (Nova), Dashboard (Horizon)</p>
+          <p>The default services configured by DevStack are Identity (Keystone), Object Storage (Swift), Image Storage (Glance), Block Storage (Cinder), Compute (Nova), Network (Nova), Dashboard (Horizon), Orchestration (Heat)</p>
           <p>Additional services not included directly in DevStack can be tied in to <code>stack.sh</code> using the <a href="plugins.html">plugin mechanism</a> to call scripts that perform the configuration and startup of the service.</p>
 
           <h3>Node Configurations</h3>
@@ -103,7 +103,7 @@
           </ul>
 
           <h3>Exercises</h3>
-          <p>The DevStack exercise scripts have been replaced as integration and gating test with Tempest.  They will continue to be maintained as they are valuable as demonstrations of using OpenStack from the command line and for quick operational testing.</p>
+          <p>The DevStack exercise scripts are no longer used as integration and gate testing as that job has transitioned to Tempest.  They are still maintained as a demonstrations of using OpenStack from the command line and for quick operational testing.</p>
 
         </div>        
 
diff --git a/docs/source/plugins.html b/docs/source/plugins.html
index 85cf8e4..3327128 100644
--- a/docs/source/plugins.html
+++ b/docs/source/plugins.html
@@ -67,7 +67,12 @@
         source $TOP_DIR/lib/template
     fi
 
-    if [[ "$1" == "stack" && "$2" == "install" ]]; then
+    if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
+        # Set up system services
+        echo_summary "Configuring system services Template"
+        install_package cowsay
+
+    elif [[ "$1" == "stack" && "$2" == "install" ]]; then
         # Perform installation of service source
         echo_summary "Installing Template"
         install_template
@@ -103,6 +108,7 @@
             <li><strong>source</strong> - Called by each script that utilizes <code>extras.d</code> hooks; this replaces directly sourcing the <code>lib/*</code> script.</li>
             <li><strong>stack</strong> - Called by <code>stack.sh</code> three times for different phases of its run:
               <ul>
+                <li><strong>pre-install</strong> - Called after system (OS) setup is complete and before project source is installed.</li>
                 <li><strong>install</strong> - Called after the layer 1 and 2 projects source and their dependencies have been installed.</li>
                 <li><strong>post-config</strong> - Called after the layer 1 and 2 services have been configured.  All configuration files for enabled services should exist at this point.</li>
                 <li><strong>extra</strong> - Called near the end after layer 1 and 2 services have been started.  This is the existing hook and has not otherwise changed.</li>
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
index d756685..a2ae275 100755
--- a/exercises/boot_from_volume.sh
+++ b/exercises/boot_from_volume.sh
@@ -71,10 +71,10 @@
 # ------
 
 # List the images available
-glance image-list
+openstack image list
 
 # Grab the id of the image to launch
-IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
+IMAGE=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
 die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
 
 # Security Groups
diff --git a/exercises/client-args.sh b/exercises/client-args.sh
index b360f1e..2f85d98 100755
--- a/exercises/client-args.sh
+++ b/exercises/client-args.sh
@@ -122,7 +122,7 @@
         STATUS_GLANCE="Skipped"
     else
         echo -e "\nTest Glance"
-        if glance $TENANT_ARG $ARGS image-list; then
+        if openstack $TENANT_ARG $ARGS image list; then
             STATUS_GLANCE="Succeeded"
         else
             STATUS_GLANCE="Failed"
diff --git a/exercises/client-env.sh b/exercises/client-env.sh
index cc518d9..4a0609a 100755
--- a/exercises/client-env.sh
+++ b/exercises/client-env.sh
@@ -132,7 +132,7 @@
         STATUS_GLANCE="Skipped"
     else
         echo -e "\nTest Glance"
-        if glance image-list; then
+        if openstack image list; then
             STATUS_GLANCE="Succeeded"
         else
             STATUS_GLANCE="Failed"
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index 7e90e5a..57f48e0 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -71,10 +71,10 @@
 # ------
 
 # List the images available
-glance image-list
+openstack image list
 
 # Grab the id of the image to launch
-IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
+IMAGE=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
 die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
 
 # Security Groups
diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh
index 6679670..5b3281b 100755
--- a/exercises/neutron-adv-test.sh
+++ b/exercises/neutron-adv-test.sh
@@ -134,7 +134,7 @@
 }
 
 function get_image_id {
-    local IMAGE_ID=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
+    local IMAGE_ID=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
     die_if_not_set $LINENO IMAGE_ID "Failure retrieving IMAGE_ID"
     echo "$IMAGE_ID"
 }
diff --git a/exercises/trove.sh b/exercises/trove.sh
index d48d5fe..053f872 100755
--- a/exercises/trove.sh
+++ b/exercises/trove.sh
@@ -35,8 +35,12 @@
 
 is_service_enabled trove || exit 55
 
-# can we get a list versions
-curl http://$SERVICE_HOST:8779/ 2>/dev/null | grep -q 'versions' || die $LINENO "Trove API not functioning!"
+# can try to get datastore id
+DSTORE_ID=$(trove datastore-list | tail -n +4 |head -3 | get_field 1)
+die_if_not_set $LINENO  DSTORE_ID "Trove API not functioning!"
+
+DV_ID=$(trove datastore-version-list $DSTORE_ID | tail -n +4 | get_field 1)
+die_if_not_set $LINENO DV_ID "Trove API not functioning!"
 
 set +o xtrace
 echo "*********************************************************************"
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
index 1dff6a4..504fba1 100755
--- a/exercises/volumes.sh
+++ b/exercises/volumes.sh
@@ -68,10 +68,10 @@
 # ------
 
 # List the images available
-glance image-list
+openstack image list
 
 # Grab the id of the image to launch
-IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
+IMAGE=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
 die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
 
 # Security Groups
diff --git a/exercises/marconi.sh b/exercises/zaqar.sh
similarity index 86%
rename from exercises/marconi.sh
rename to exercises/zaqar.sh
index 9d83a99..6996f34 100755
--- a/exercises/marconi.sh
+++ b/exercises/zaqar.sh
@@ -1,8 +1,8 @@
 #!/usr/bin/env bash
 
-# **marconi.sh**
+# **zaqar.sh**
 
-# Sanity check that Marconi started if enabled
+# Sanity check that Zaqar started if enabled
 
 echo "*********************************************************************"
 echo "Begin DevStack Exercise: $0"
@@ -33,9 +33,9 @@
 # Import exercise configuration
 source $TOP_DIR/exerciserc
 
-is_service_enabled marconi-server || exit 55
+is_service_enabled zaqar-server || exit 55
 
-curl http://$SERVICE_HOST:8888/v1/ 2>/dev/null | grep -q 'queue_name' || die $LINENO "Marconi API not functioning!"
+curl http://$SERVICE_HOST:8888/v1/ 2>/dev/null | grep -q 'queue_name' || die $LINENO "Zaqar API not functioning!"
 
 set +o xtrace
 echo "*********************************************************************"
diff --git a/extras.d/40-dib.sh b/extras.d/40-dib.sh
new file mode 100644
index 0000000..fdae011
--- /dev/null
+++ b/extras.d/40-dib.sh
@@ -0,0 +1,27 @@
+# dib.sh - Devstack extras script to install diskimage-builder
+
+if is_service_enabled dib; then
+    if [[ "$1" == "source" ]]; then
+        # Initial source
+        source $TOP_DIR/lib/dib
+    elif [[ "$1" == "stack" && "$2" == "install" ]]; then
+        echo_summary "Installing diskimage-builder"
+        install_dib
+    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
+        # no-op
+        :
+    elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
+        # no-op
+        :
+    fi
+
+    if [[ "$1" == "unstack" ]]; then
+        # no-op
+        :
+    fi
+
+    if [[ "$1" == "clean" ]]; then
+        # no-op
+        :
+    fi
+fi
diff --git a/extras.d/60-ceph.sh b/extras.d/60-ceph.sh
new file mode 100644
index 0000000..5fb34ea
--- /dev/null
+++ b/extras.d/60-ceph.sh
@@ -0,0 +1,44 @@
+# ceph.sh - DevStack extras script to install Ceph
+
+if is_service_enabled ceph; then
+    if [[ "$1" == "source" ]]; then
+        # Initial source
+        source $TOP_DIR/lib/ceph
+    elif [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
+        echo_summary "Installing Ceph"
+        install_ceph
+        echo_summary "Configuring Ceph"
+        configure_ceph
+        # NOTE (leseb): Do everything here because we need to have Ceph started before the main
+        # OpenStack components. Ceph OSD must start here otherwise we can't upload any images.
+        echo_summary "Initializing Ceph"
+        init_ceph
+        start_ceph
+    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
+        if is_service_enabled glance; then
+            echo_summary "Configuring Glance for Ceph"
+            configure_ceph_glance
+        fi
+        if is_service_enabled nova; then
+            echo_summary "Configuring Nova for Ceph"
+            configure_ceph_nova
+        fi
+        if is_service_enabled cinder; then
+            echo_summary "Configuring Cinder for Ceph"
+            configure_ceph_cinder
+            # NOTE (leseb): the part below is a requirement from Cinder in order to attach volumes
+            # so we should run the following within the if statement.
+            echo_summary "Configuring libvirt secret"
+            import_libvirt_secret_ceph
+        fi
+    fi
+
+    if [[ "$1" == "unstack" ]]; then
+        stop_ceph
+        cleanup_ceph
+    fi
+
+    if [[ "$1" == "clean" ]]; then
+        cleanup_ceph
+    fi
+fi
diff --git a/extras.d/70-marconi.sh b/extras.d/70-marconi.sh
deleted file mode 100644
index a96a4c5..0000000
--- a/extras.d/70-marconi.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-# marconi.sh - Devstack extras script to install Marconi
-
-if is_service_enabled marconi-server; then
-    if [[ "$1" == "source" ]]; then
-        # Initial source
-        source $TOP_DIR/lib/marconi
-    elif [[ "$1" == "stack" && "$2" == "install" ]]; then
-        echo_summary "Installing Marconi"
-        install_marconiclient
-        install_marconi
-    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
-        echo_summary "Configuring Marconi"
-        configure_marconi
-        configure_marconiclient
-
-        if is_service_enabled key; then
-            create_marconi_accounts
-        fi
-
-    elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
-        echo_summary "Initializing Marconi"
-        init_marconi
-        start_marconi
-    fi
-
-    if [[ "$1" == "unstack" ]]; then
-        stop_marconi
-    fi
-fi
diff --git a/extras.d/70-sahara.sh b/extras.d/70-sahara.sh
index 80e07ff..2a34999 100644
--- a/extras.d/70-sahara.sh
+++ b/extras.d/70-sahara.sh
@@ -4,21 +4,15 @@
     if [[ "$1" == "source" ]]; then
         # Initial source
         source $TOP_DIR/lib/sahara
-        source $TOP_DIR/lib/sahara-dashboard
     elif [[ "$1" == "stack" && "$2" == "install" ]]; then
         echo_summary "Installing sahara"
         install_sahara
+        install_python_saharaclient
         cleanup_sahara
-        if is_service_enabled horizon; then
-            install_sahara_dashboard
-        fi
     elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
         echo_summary "Configuring sahara"
         configure_sahara
         create_sahara_accounts
-        if is_service_enabled horizon; then
-            configure_sahara_dashboard
-        fi
     elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
         echo_summary "Initializing sahara"
         start_sahara
@@ -26,9 +20,6 @@
 
     if [[ "$1" == "unstack" ]]; then
         stop_sahara
-        if is_service_enabled horizon; then
-            cleanup_sahara_dashboard
-        fi
     fi
 
     if [[ "$1" == "clean" ]]; then
diff --git a/extras.d/70-zaqar.sh b/extras.d/70-zaqar.sh
new file mode 100644
index 0000000..63c4fd5
--- /dev/null
+++ b/extras.d/70-zaqar.sh
@@ -0,0 +1,29 @@
+# zaqar.sh - Devstack extras script to install Zaqar
+
+if is_service_enabled zaqar-server; then
+    if [[ "$1" == "source" ]]; then
+        # Initial source
+        source $TOP_DIR/lib/zaqar
+    elif [[ "$1" == "stack" && "$2" == "install" ]]; then
+        echo_summary "Installing Zaqar"
+        install_zaqarclient
+        install_zaqar
+    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
+        echo_summary "Configuring Zaqar"
+        configure_zaqar
+        configure_zaqarclient
+
+        if is_service_enabled key; then
+            create_zaqar_accounts
+        fi
+
+    elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
+        echo_summary "Initializing Zaqar"
+        init_zaqar
+        start_zaqar
+    fi
+
+    if [[ "$1" == "unstack" ]]; then
+        stop_zaqar
+    fi
+fi
diff --git a/files/apache-ceilometer.template b/files/apache-ceilometer.template
new file mode 100644
index 0000000..1c57b32
--- /dev/null
+++ b/files/apache-ceilometer.template
@@ -0,0 +1,15 @@
+Listen %PORT%
+
+<VirtualHost *:%PORT%>
+    WSGIDaemonProcess ceilometer-api processes=2 threads=10 user=%USER% display-name=%{GROUP}
+    WSGIProcessGroup ceilometer-api
+    WSGIScriptAlias / %WSGIAPP%
+    WSGIApplicationGroup %{GLOBAL}
+    <IfVersion >= 2.4>
+        ErrorLogFormat "%{cu}t %M"
+    </IfVersion>
+    ErrorLog /var/log/%APACHE_NAME%/ceilometer.log
+    CustomLog /var/log/%APACHE_NAME%/ceilometer_access.log combined
+</VirtualHost>
+
+WSGISocketPrefix /var/run/%APACHE_NAME%
diff --git a/files/apache-dib-pip-repo.template b/files/apache-dib-pip-repo.template
new file mode 100644
index 0000000..5d2379b
--- /dev/null
+++ b/files/apache-dib-pip-repo.template
@@ -0,0 +1,15 @@
+Listen %DIB_PIP_REPO_PORT%
+
+<VirtualHost *:%DIB_PIP_REPO_PORT%>
+    DocumentRoot %DIB_PIP_REPO%
+    <Directory %DIB_PIP_REPO%>
+        DirectoryIndex index.html
+        Require all granted
+        Order allow,deny
+        allow from all
+    </Directory>
+
+    ErrorLog /var/log/%APACHE_NAME%/dib_pip_repo_error.log
+    LogLevel warn
+    CustomLog /var/log/%APACHE_NAME%/dib_pip_repo_access.log combined
+</VirtualHost>
diff --git a/files/apache-horizon.template b/files/apache-horizon.template
index af880c4..bca1251 100644
--- a/files/apache-horizon.template
+++ b/files/apache-horizon.template
@@ -1,6 +1,6 @@
 <VirtualHost *:80>
     WSGIScriptAlias / %HORIZON_DIR%/openstack_dashboard/wsgi/django.wsgi
-    WSGIDaemonProcess horizon user=%USER% group=%GROUP% processes=3 threads=10 home=%HORIZON_DIR%
+    WSGIDaemonProcess horizon user=%USER% group=%GROUP% processes=3 threads=10 home=%HORIZON_DIR% display-name=%{GROUP}
     WSGIApplicationGroup %{GLOBAL}
 
     SetEnv APACHE_RUN_USER %USER%
@@ -17,10 +17,16 @@
 
     <Directory %HORIZON_DIR%/>
         Options Indexes FollowSymLinks MultiViews
-        %HORIZON_REQUIRE%
         AllowOverride None
-        Order allow,deny
-        allow from all
+        # Apache 2.4 uses mod_authz_host for access control now (instead of
+        #  "Allow")
+        <IfVersion < 2.4>
+            Order allow,deny
+            Allow from all
+        </IfVersion>
+        <IfVersion >= 2.4>
+            Require all granted
+        </IfVersion>
     </Directory>
 
     ErrorLog /var/log/%APACHE_NAME%/horizon_error.log
diff --git a/files/apache-ironic.template b/files/apache-ironic.template
new file mode 100644
index 0000000..8864194
--- /dev/null
+++ b/files/apache-ironic.template
@@ -0,0 +1,12 @@
+Listen %PUBLICPORT%
+
+<VirtualHost *:%PUBLICPORT%>
+    DocumentRoot "%HTTPROOT%"
+    <Directory "%HTTPROOT%">
+        Options Indexes FollowSymLinks
+        AllowOverride None
+        Order allow,deny
+        Allow from all
+        Require all granted
+    </Directory>
+</VirtualHost>
diff --git a/files/apache-keystone.template b/files/apache-keystone.template
index 919452a..88492d3 100644
--- a/files/apache-keystone.template
+++ b/files/apache-keystone.template
@@ -2,21 +2,35 @@
 Listen %ADMINPORT%
 
 <VirtualHost *:%PUBLICPORT%>
-    WSGIDaemonProcess keystone-public processes=5 threads=1 user=%USER%
+    WSGIDaemonProcess keystone-public processes=5 threads=1 user=%USER% display-name=%{GROUP}
     WSGIProcessGroup keystone-public
     WSGIScriptAlias / %PUBLICWSGI%
     WSGIApplicationGroup %{GLOBAL}
-    ErrorLog /var/log/%APACHE_NAME%/keystone
-    LogLevel debug
-    CustomLog /var/log/%APACHE_NAME%/access.log combined
+    <IfVersion >= 2.4>
+      ErrorLogFormat "%{cu}t %M"
+    </IfVersion>
+    ErrorLog /var/log/%APACHE_NAME%/keystone.log
+    CustomLog /var/log/%APACHE_NAME%/keystone_access.log combined
+    %SSLENGINE%
+    %SSLCERTFILE%
+    %SSLKEYFILE%
 </VirtualHost>
 
 <VirtualHost *:%ADMINPORT%>
-    WSGIDaemonProcess keystone-admin processes=5 threads=1 user=%USER%
+    WSGIDaemonProcess keystone-admin processes=5 threads=1 user=%USER% display-name=%{GROUP}
     WSGIProcessGroup keystone-admin
     WSGIScriptAlias / %ADMINWSGI%
     WSGIApplicationGroup %{GLOBAL}
-    ErrorLog /var/log/%APACHE_NAME%/keystone
-    LogLevel debug
-    CustomLog /var/log/%APACHE_NAME%/access.log combined
+    <IfVersion >= 2.4>
+      ErrorLogFormat "%{cu}t %M"
+    </IfVersion>
+    ErrorLog /var/log/%APACHE_NAME%/keystone.log
+    CustomLog /var/log/%APACHE_NAME%/keystone_access.log combined
+    %SSLENGINE%
+    %SSLCERTFILE%
+    %SSLKEYFILE%
 </VirtualHost>
+
+# Workaround for missing path on RHEL6, see
+#  https://bugzilla.redhat.com/show_bug.cgi?id=1121019
+WSGISocketPrefix /var/run/%APACHE_NAME%
diff --git a/files/apts/ceph b/files/apts/ceph
new file mode 100644
index 0000000..69863ab
--- /dev/null
+++ b/files/apts/ceph
@@ -0,0 +1,2 @@
+ceph    # NOPRIME
+xfsprogs
diff --git a/files/apts/general b/files/apts/general
index f3cab59..c308c46 100644
--- a/files/apts/general
+++ b/files/apts/general
@@ -7,6 +7,7 @@
 psmisc
 gcc
 git
+graphviz # testonly - docs
 lsof # useful when debugging
 openssh-server
 openssl
@@ -24,3 +25,5 @@
 python2.7
 bc
 libyaml-dev
+libffi-dev
+libssl-dev # for pyOpenSSL
diff --git a/files/apts/glance b/files/apts/glance
index b5d8c77..15e09aa 100644
--- a/files/apts/glance
+++ b/files/apts/glance
@@ -1,4 +1,3 @@
-libffi-dev
 libmysqlclient-dev  # testonly
 libpq-dev           # testonly
 libssl-dev          # testonly
diff --git a/files/apts/heat b/files/apts/heat
new file mode 100644
index 0000000..1ecbc78
--- /dev/null
+++ b/files/apts/heat
@@ -0,0 +1 @@
+gettext # dist:trusty
diff --git a/files/apts/horizon b/files/apts/horizon
index 8969046..03df3cb 100644
--- a/files/apts/horizon
+++ b/files/apts/horizon
@@ -9,13 +9,11 @@
 python-xattr
 python-sqlalchemy
 python-webob
-python-kombu
 pylint
 python-eventlet
 python-nose
 python-sphinx
 python-mox
-python-kombu
 python-coverage
 python-cherrypy3 # why?
 python-migrate
diff --git a/files/apts/ironic b/files/apts/ironic
index fe9c07f..283d1b2 100644
--- a/files/apts/ironic
+++ b/files/apts/ironic
@@ -1,5 +1,7 @@
+docker.io
 ipmitool
 iptables
+ipxe
 libguestfs0
 libvirt-bin
 openssh-client
diff --git a/files/apts/keystone b/files/apts/keystone
index b7218b7..d316a42 100644
--- a/files/apts/keystone
+++ b/files/apts/keystone
@@ -6,6 +6,7 @@
 python-pysqlite2
 python-sqlalchemy
 python-mysqldb
+python-mysql.connector
 python-webob
 python-greenlet
 python-routes
diff --git a/files/apts/neutron b/files/apts/neutron
index 648716a..a48a800 100644
--- a/files/apts/neutron
+++ b/files/apts/neutron
@@ -2,24 +2,25 @@
 iptables
 iputils-ping
 iputils-arping
+libmysqlclient-dev  # testonly
 mysql-server #NOPRIME
 sudo
-python-boto
 python-iso8601
 python-paste
 python-routes
 python-suds
 python-pastedeploy
 python-greenlet
-python-kombu
 python-eventlet
 python-sqlalchemy
 python-mysqldb
+python-mysql.connector
 python-pyudev
-python-qpid # dist:precise
+python-qpid # NOPRIME
 dnsmasq-base
 dnsmasq-utils # for dhcp_release only available in dist:precise
 rabbitmq-server # NOPRIME
 qpidd # NOPRIME
 sqlite3
 vlan
+radvd # NOPRIME
diff --git a/files/apts/nova b/files/apts/nova
index 38c99c7..a3b0cb1 100644
--- a/files/apts/nova
+++ b/files/apts/nova
@@ -1,10 +1,13 @@
 dnsmasq-base
 dnsmasq-utils # for dhcp_release
+conntrack
 kpartx
 parted
 iputils-arping
+libmysqlclient-dev  # testonly
 mysql-server # NOPRIME
 python-mysqldb
+python-mysql.connector
 python-xattr # needed for glance which is needed for nova --- this shouldn't be here
 python-lxml # needed for glance which is needed for nova --- this shouldn't be here
 gawk
@@ -21,7 +24,7 @@
 curl
 genisoimage # required for config_drive
 rabbitmq-server # NOPRIME
-qpidd # dist:precise NOPRIME
+qpidd # NOPRIME
 socat # used by ajaxterm
 python-mox
 python-paste
@@ -39,8 +42,6 @@
 python-suds
 python-lockfile
 python-m2crypto
-python-boto
-python-kombu
 python-feedparser
 python-iso8601
-python-qpid # dist:precise
+python-qpid # NOPRIME
diff --git a/files/apts/q-agt b/files/apts/q-agt
new file mode 100644
index 0000000..ea8819e
--- /dev/null
+++ b/files/apts/q-agt
@@ -0,0 +1 @@
+ipset
diff --git a/files/apts/q-l3 b/files/apts/q-l3
new file mode 100644
index 0000000..b98b628
--- /dev/null
+++ b/files/apts/q-l3
@@ -0,0 +1,2 @@
+conntrackd
+keepalived
diff --git a/files/apts/swift b/files/apts/swift
index 080ecdb..fd51699 100644
--- a/files/apts/swift
+++ b/files/apts/swift
@@ -1,5 +1,4 @@
 curl
-libffi-dev
 memcached
 python-configobj
 python-coverage
diff --git a/files/apts/marconi-server b/files/apts/zaqar-server
similarity index 100%
rename from files/apts/marconi-server
rename to files/apts/zaqar-server
diff --git a/files/default_catalog.templates b/files/default_catalog.templates
index ff00e38..9016355 100644
--- a/files/default_catalog.templates
+++ b/files/default_catalog.templates
@@ -12,12 +12,6 @@
 catalog.RegionOne.compute.name = Compute Service
 
 
-catalog.RegionOne.computev3.publicURL = http://%SERVICE_HOST%:8774/v3
-catalog.RegionOne.computev3.adminURL = http://%SERVICE_HOST%:8774/v3
-catalog.RegionOne.computev3.internalURL = http://%SERVICE_HOST%:8774/v3
-catalog.RegionOne.computev3.name = Compute Service V3
-
-
 catalog.RegionOne.volume.publicURL = http://%SERVICE_HOST%:8776/v1/$(tenant_id)s
 catalog.RegionOne.volume.adminURL = http://%SERVICE_HOST%:8776/v1/$(tenant_id)s
 catalog.RegionOne.volume.internalURL = http://%SERVICE_HOST%:8776/v1/$(tenant_id)s
diff --git a/files/rpms-suse/ceph b/files/rpms-suse/ceph
new file mode 100644
index 0000000..8d46500
--- /dev/null
+++ b/files/rpms-suse/ceph
@@ -0,0 +1,3 @@
+ceph    # NOPRIME
+xfsprogs
+lsb
diff --git a/files/rpms-suse/general b/files/rpms-suse/general
index 82cb09d..0a4746f 100644
--- a/files/rpms-suse/general
+++ b/files/rpms-suse/general
@@ -5,6 +5,7 @@
 euca2ools
 gcc
 git-core
+graphviz # testonly - docs
 iputils
 libopenssl-devel # to rebuild pyOpenSSL if needed
 lsof # useful when debugging
diff --git a/files/rpms-suse/horizon b/files/rpms-suse/horizon
index d3bde26..fa7e439 100644
--- a/files/rpms-suse/horizon
+++ b/files/rpms-suse/horizon
@@ -12,7 +12,6 @@
 python-coverage
 python-dateutil
 python-eventlet
-python-kombu
 python-mox
 python-nose
 python-pylint
diff --git a/files/rpms-suse/keystone b/files/rpms-suse/keystone
index 403d82f..4c37ade 100644
--- a/files/rpms-suse/keystone
+++ b/files/rpms-suse/keystone
@@ -10,5 +10,6 @@
 python-greenlet
 python-lxml
 python-mysql
+python-mysql-connector-python
 python-pysqlite
 sqlite3
diff --git a/files/rpms-suse/neutron b/files/rpms-suse/neutron
index d4841b1..8431bd1 100644
--- a/files/rpms-suse/neutron
+++ b/files/rpms-suse/neutron
@@ -4,12 +4,11 @@
 iptables
 iputils
 mariadb # NOPRIME
-python-boto
 python-eventlet
 python-greenlet
 python-iso8601
-python-kombu
 python-mysql
+python-mysql-connector-python
 python-Paste
 python-PasteDeploy
 python-pyudev
@@ -20,6 +19,7 @@
 sqlite3
 sudo
 vlan
+radvd # NOPRIME
 
 # FIXME: qpid is not part of openSUSE, those names are tentative
 python-qpid # NOPRIME
diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova
index 3e95724..b1c4f6a 100644
--- a/files/rpms-suse/nova
+++ b/files/rpms-suse/nova
@@ -1,6 +1,7 @@
 curl
 dnsmasq
 dnsmasq-utils # dist:opensuse-12.3,opensuse-13.1
+conntrack-tools
 ebtables
 gawk
 genisoimage # required for config_drive
@@ -22,18 +23,17 @@
 python-Routes
 python-SQLAlchemy
 python-Tempita
-python-boto
 python-cheetah
 python-eventlet
 python-feedparser
 python-greenlet
 python-iso8601
-python-kombu
 python-libxml2
 python-lockfile
 python-lxml # needed for glance which is needed for nova --- this shouldn't be here
 python-mox
 python-mysql
+python-mysql-connector-python
 python-numpy # needed by websockify for spice console
 python-paramiko
 python-sqlalchemy-migrate
diff --git a/files/rpms-suse/q-agt b/files/rpms-suse/q-agt
new file mode 100644
index 0000000..ea8819e
--- /dev/null
+++ b/files/rpms-suse/q-agt
@@ -0,0 +1 @@
+ipset
diff --git a/files/rpms/ceph b/files/rpms/ceph
new file mode 100644
index 0000000..5483735
--- /dev/null
+++ b/files/rpms/ceph
@@ -0,0 +1,3 @@
+ceph    # NOPRIME
+xfsprogs
+redhat-lsb-core
diff --git a/files/rpms/general b/files/rpms/general
index a0074dd..7a35961 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -4,9 +4,11 @@
 euca2ools # only for testing client
 gcc
 git-core
+graphviz # testonly - docs
 openssh-server
 openssl
 openssl-devel # to rebuild pyOpenSSL if needed
+libffi-devel
 libxml2-devel
 libxslt-devel
 psmisc
diff --git a/files/rpms/glance b/files/rpms/glance
index fc07fa7..5a7f073 100644
--- a/files/rpms/glance
+++ b/files/rpms/glance
@@ -1,4 +1,3 @@
-libffi-devel
 libxml2-devel       # testonly
 libxslt-devel       # testonly
 mysql-devel         # testonly
diff --git a/files/rpms/horizon b/files/rpms/horizon
index 92afed2..fe3a2f4 100644
--- a/files/rpms/horizon
+++ b/files/rpms/horizon
@@ -4,13 +4,11 @@
 pylint
 python-anyjson
 python-BeautifulSoup
-python-boto
 python-coverage
 python-dateutil
 python-eventlet
 python-greenlet
 python-httplib2
-python-kombu
 python-migrate
 python-mox
 python-nose
diff --git a/files/rpms/ironic b/files/rpms/ironic
index 0c81081..e646f3a 100644
--- a/files/rpms/ironic
+++ b/files/rpms/ironic
@@ -1,5 +1,7 @@
+docker-io
 ipmitool
 iptables
+ipxe-bootimgs
 libguestfs
 libvirt
 libvirt-python
diff --git a/files/rpms/keystone b/files/rpms/keystone
index 7182091..ce41ee5 100644
--- a/files/rpms/keystone
+++ b/files/rpms/keystone
@@ -1,3 +1,4 @@
+MySQL-python
 python-greenlet
 libxslt-devel       # dist:f20
 python-lxml         #dist:f19,f20
@@ -8,5 +9,6 @@
 python-sqlalchemy
 python-webob
 sqlite
+mod_ssl
 
 # Deps installed via pip for RHEL
diff --git a/files/rpms/neutron b/files/rpms/neutron
index 9fafecb..2c9dd3d 100644
--- a/files/rpms/neutron
+++ b/files/rpms/neutron
@@ -1,19 +1,20 @@
 MySQL-python
+dnsmasq # for q-dhcp
 dnsmasq-utils # for dhcp_release
 ebtables
 iptables
 iputils
+mysql-connector-python
+mysql-devel  # testonly
 mysql-server # NOPRIME
 openvswitch # NOPRIME
-python-boto
 python-eventlet
 python-greenlet
 python-iso8601
-python-kombu
 #rhel6 gets via pip
 python-paste        # dist:f19,f20,rhel7
 python-paste-deploy # dist:f19,f20,rhel7
-python-qpid
+python-qpid # NOPRIME
 python-routes
 python-sqlalchemy
 python-suds
@@ -21,3 +22,4 @@
 qpid-cpp-server        # NOPRIME
 sqlite
 sudo
+radvd # NOPRIME
diff --git a/files/rpms/nova b/files/rpms/nova
index e05d0d7..dc1944b 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -1,6 +1,8 @@
 MySQL-python
 curl
+dnsmasq # for nova-network
 dnsmasq-utils # for dhcp_release
+conntrack-tools
 ebtables
 gawk
 genisoimage # required for config_drive
@@ -13,16 +15,16 @@
 libxml2-python
 numpy # needed by websockify for spice console
 m2crypto
+mysql-connector-python
+mysql-devel  # testonly
 mysql-server # NOPRIME
 parted
 polkit
-python-boto
 python-cheetah
 python-eventlet
 python-feedparser
 python-greenlet
 python-iso8601
-python-kombu
 python-lockfile
 python-migrate
 python-mox
@@ -31,7 +33,7 @@
 # pip we need
 python-paste        # dist:f19,f20,rhel7
 python-paste-deploy # dist:f19,f20,rhel7
-python-qpid
+python-qpid # NOPRIME
 python-routes
 python-sqlalchemy
 python-suds
diff --git a/files/rpms/q-agt b/files/rpms/q-agt
new file mode 100644
index 0000000..ea8819e
--- /dev/null
+++ b/files/rpms/q-agt
@@ -0,0 +1 @@
+ipset
diff --git a/files/rpms/q-l3 b/files/rpms/q-l3
new file mode 100644
index 0000000..a7a190c
--- /dev/null
+++ b/files/rpms/q-l3
@@ -0,0 +1,2 @@
+conntrack-tools
+keepalived
diff --git a/files/rpms/qpid b/files/rpms/qpid
new file mode 100644
index 0000000..62148ba
--- /dev/null
+++ b/files/rpms/qpid
@@ -0,0 +1,3 @@
+qpid-proton-c-devel # NOPRIME
+python-qpid-proton # NOPRIME
+
diff --git a/files/rpms/swift b/files/rpms/swift
index 938d2c8..9ec4aab 100644
--- a/files/rpms/swift
+++ b/files/rpms/swift
@@ -1,5 +1,4 @@
 curl
-libffi-devel
 memcached
 python-configobj
 python-coverage
diff --git a/files/rpms/marconi-server b/files/rpms/zaqar-server
similarity index 100%
rename from files/rpms/marconi-server
rename to files/rpms/zaqar-server
diff --git a/functions b/functions
index ca8ef80..376aff0 100644
--- a/functions
+++ b/functions
@@ -21,18 +21,6 @@
     declare -f -F $1 > /dev/null
 }
 
-# Checks if installed Apache is <= given version
-# $1 = x.y.z (version string of Apache)
-function check_apache_version {
-    local cmd="apachectl"
-    if ! [[ -x $(which apachectl 2>/dev/null) ]]; then
-        cmd="/usr/sbin/apachectl"
-    fi
-
-    local version=$($cmd -v | grep version | grep -Po 'Apache/\K[^ ]*')
-    expr "$version" '>=' $1 > /dev/null
-}
-
 
 # Cleanup anything from /tmp on unstack
 # clean_tmp
@@ -55,26 +43,28 @@
     local image_url=$1
     local token=$2
 
+    local image image_fname image_name
+
     # Create a directory for the downloaded image tarballs.
     mkdir -p $FILES/images
-    IMAGE_FNAME=`basename "$image_url"`
+    image_fname=`basename "$image_url"`
     if [[ $image_url != file* ]]; then
         # Downloads the image (uec ami+akistyle), then extracts it.
-        if [[ ! -f $FILES/$IMAGE_FNAME || "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then
-            wget -c $image_url -O $FILES/$IMAGE_FNAME
+        if [[ ! -f $FILES/$image_fname || "$(stat -c "%s" $FILES/$image_fname)" = "0" ]]; then
+            wget -c $image_url -O $FILES/$image_fname
             if [[ $? -ne 0 ]]; then
                 echo "Not found: $image_url"
                 return
             fi
         fi
-        IMAGE="$FILES/${IMAGE_FNAME}"
+        image="$FILES/${image_fname}"
     else
         # File based URL (RFC 1738): file://host/path
         # Remote files are not considered here.
         # *nix: file:///home/user/path/file
         # windows: file:///C:/Documents%20and%20Settings/user/path/file
-        IMAGE=$(echo $image_url | sed "s/^file:\/\///g")
-        if [[ ! -f $IMAGE || "$(stat -c "%s" $IMAGE)" == "0" ]]; then
+        image=$(echo $image_url | sed "s/^file:\/\///g")
+        if [[ ! -f $image || "$(stat -c "%s" $image)" == "0" ]]; then
             echo "Not found: $image_url"
             return
         fi
@@ -82,14 +72,14 @@
 
     # OpenVZ-format images are provided as .tar.gz, but not decompressed prior to loading
     if [[ "$image_url" =~ 'openvz' ]]; then
-        IMAGE_NAME="${IMAGE_FNAME%.tar.gz}"
-        glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format ami --disk-format ami < "${IMAGE}"
+        image_name="${image_fname%.tar.gz}"
+        openstack --os-token $token --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image create "$image_name" --public --container-format ami --disk-format ami < "${image}"
         return
     fi
 
     # vmdk format images
     if [[ "$image_url" =~ '.vmdk' ]]; then
-        IMAGE_NAME="${IMAGE_FNAME%.vmdk}"
+        image_name="${image_fname%.vmdk}"
 
         # Before we can upload vmdk type images to glance, we need to know it's
         # disk type, storage adapter, and networking adapter. These values are
@@ -102,17 +92,17 @@
         # If the filename does not follow the above format then the vsphere
         # driver will supply default values.
 
-        vmdk_adapter_type=""
-        vmdk_disktype=""
-        vmdk_net_adapter=""
+        local vmdk_disktype=""
+        local vmdk_net_adapter=""
+        local path_len
 
         # vmdk adapter type
-        vmdk_adapter_type="$(head -25 $IMAGE | { grep -a -F -m 1 'ddb.adapterType =' $IMAGE || true; })"
+        local vmdk_adapter_type="$(head -25 $image | { grep -a -F -m 1 'ddb.adapterType =' $image || true; })"
         vmdk_adapter_type="${vmdk_adapter_type#*\"}"
         vmdk_adapter_type="${vmdk_adapter_type%?}"
 
         # vmdk disk type
-        vmdk_create_type="$(head -25 $IMAGE | { grep -a -F -m 1 'createType=' $IMAGE || true; })"
+        local vmdk_create_type="$(head -25 $image | { grep -a -F -m 1 'createType=' $image || true; })"
         vmdk_create_type="${vmdk_create_type#*\"}"
         vmdk_create_type="${vmdk_create_type%\"*}"
 
@@ -120,17 +110,16 @@
                                     `"should use a descriptor-data pair."
         if [[ "$vmdk_create_type" = "monolithicSparse" ]]; then
             vmdk_disktype="sparse"
-        elif [[ "$vmdk_create_type" = "monolithicFlat" || \
-        "$vmdk_create_type" = "vmfs" ]]; then
+        elif [[ "$vmdk_create_type" = "monolithicFlat" || "$vmdk_create_type" = "vmfs" ]]; then
             # Attempt to retrieve the *-flat.vmdk
-            flat_fname="$(head -25 $IMAGE | { grep -G 'RW\|RDONLY [0-9]+ FLAT\|VMFS' $IMAGE || true; })"
+            local flat_fname="$(head -25 $image | { grep -G 'RW\|RDONLY [0-9]+ FLAT\|VMFS' $image || true; })"
             flat_fname="${flat_fname#*\"}"
             flat_fname="${flat_fname%?}"
             if [[ -z "$flat_fname" ]]; then
-                flat_fname="$IMAGE_NAME-flat.vmdk"
+                flat_fname="$image_name-flat.vmdk"
             fi
-            path_len=`expr ${#image_url} - ${#IMAGE_FNAME}`
-            flat_url="${image_url:0:$path_len}$flat_fname"
+            path_len=`expr ${#image_url} - ${#image_fname}`
+            local flat_url="${image_url:0:$path_len}$flat_fname"
             warn $LINENO "$descriptor_data_pair_msg"`
                             `" Attempt to retrieve the *-flat.vmdk: $flat_url"
             if [[ $flat_url != file* ]]; then
@@ -138,29 +127,29 @@
                 "$(stat -c "%s" $FILES/$flat_fname)" = "0" ]]; then
                     wget -c $flat_url -O $FILES/$flat_fname
                 fi
-                IMAGE="$FILES/${flat_fname}"
+                image="$FILES/${flat_fname}"
             else
-                IMAGE=$(echo $flat_url | sed "s/^file:\/\///g")
-                if [[ ! -f $IMAGE || "$(stat -c "%s" $IMAGE)" == "0" ]]; then
+                image=$(echo $flat_url | sed "s/^file:\/\///g")
+                if [[ ! -f $image || "$(stat -c "%s" $image)" == "0" ]]; then
                     echo "Flat disk not found: $flat_url"
                     return 1
                 fi
             fi
-            IMAGE_NAME="${flat_fname}"
+            image_name="${flat_fname}"
             vmdk_disktype="preallocated"
         elif [[ "$vmdk_create_type" = "streamOptimized" ]]; then
             vmdk_disktype="streamOptimized"
         elif [[ -z "$vmdk_create_type" ]]; then
             # *-flat.vmdk provided: attempt to retrieve the descriptor (*.vmdk)
             # to retrieve appropriate metadata
-            if [[ ${IMAGE_NAME: -5} != "-flat" ]]; then
+            if [[ ${image_name: -5} != "-flat" ]]; then
                 warn $LINENO "Expected filename suffix: '-flat'."`
-                            `" Filename provided: ${IMAGE_NAME}"
+                            `" Filename provided: ${image_name}"
             else
-                descriptor_fname="${IMAGE_NAME:0:${#IMAGE_NAME} - 5}.vmdk"
-                path_len=`expr ${#image_url} - ${#IMAGE_FNAME}`
-                flat_path="${image_url:0:$path_len}"
-                descriptor_url=$flat_path$descriptor_fname
+                descriptor_fname="${image_name:0:${#image_name} - 5}.vmdk"
+                path_len=`expr ${#image_url} - ${#image_fname}`
+                local flat_path="${image_url:0:$path_len}"
+                local descriptor_url=$flat_path$descriptor_fname
                 warn $LINENO "$descriptor_data_pair_msg"`
                                 `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url"
                 if [[ $flat_path != file* ]]; then
@@ -189,35 +178,35 @@
         # NOTE: For backwards compatibility reasons, colons may be used in place
         # of semi-colons for property delimiters but they are not permitted
         # characters in NTFS filesystems.
-        property_string=`echo "$IMAGE_NAME" | { grep -oP '(?<=-)(?!.*-).*[:;].*[:;].*$' || true; }`
+        property_string=`echo "$image_name" | { grep -oP '(?<=-)(?!.*-).*[:;].*[:;].*$' || true; }`
         IFS=':;' read -a props <<< "$property_string"
         vmdk_disktype="${props[0]:-$vmdk_disktype}"
         vmdk_adapter_type="${props[1]:-$vmdk_adapter_type}"
         vmdk_net_adapter="${props[2]:-$vmdk_net_adapter}"
 
-        glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format bare --disk-format vmdk --property vmware_disktype="$vmdk_disktype" --property vmware_adaptertype="$vmdk_adapter_type" --property hw_vif_model="$vmdk_net_adapter" < "${IMAGE}"
+        openstack --os-token $token --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image create "$image_name" --public --container-format bare --disk-format vmdk --property vmware_disktype="$vmdk_disktype" --property vmware_adaptertype="$vmdk_adapter_type" --property hw_vif_model="$vmdk_net_adapter" < "${image}"
         return
     fi
 
     # XenServer-vhd-ovf-format images are provided as .vhd.tgz
     # and should not be decompressed prior to loading
     if [[ "$image_url" =~ '.vhd.tgz' ]]; then
-        IMAGE_NAME="${IMAGE_FNAME%.vhd.tgz}"
-        FORCE_VM_MODE=""
-        if [[ "$IMAGE_NAME" =~ 'cirros' ]]; then
+        image_name="${image_fname%.vhd.tgz}"
+        local force_vm_mode=""
+        if [[ "$image_name" =~ 'cirros' ]]; then
             # Cirros VHD image currently only boots in PV mode.
             # Nova defaults to PV for all VHD images, but
             # the glance setting is needed for booting
             # directly from volume.
-            FORCE_VM_MODE="--property vm_mode=xen"
+            force_vm_mode="--property vm_mode=xen"
         fi
-        glance \
-            --os-auth-token $token \
-            --os-image-url http://$GLANCE_HOSTPORT \
-            image-create \
-            --name "$IMAGE_NAME" --is-public=True \
+        openstack \
+            --os-token $token \
+            --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT \
+            image create \
+            "$image_name" --public \
             --container-format=ovf --disk-format=vhd \
-            $FORCE_VM_MODE < "${IMAGE}"
+            $force_vm_mode < "${image}"
         return
     fi
 
@@ -225,93 +214,103 @@
     # and should not be decompressed prior to loading.
     # Setting metadata, so PV mode is used.
     if [[ "$image_url" =~ '.xen-raw.tgz' ]]; then
-        IMAGE_NAME="${IMAGE_FNAME%.xen-raw.tgz}"
-        glance \
-            --os-auth-token $token \
-            --os-image-url http://$GLANCE_HOSTPORT \
-            image-create \
-            --name "$IMAGE_NAME" --is-public=True \
+        image_name="${image_fname%.xen-raw.tgz}"
+        openstack \
+            --os-token $token \
+            --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT \
+            image create \
+            "$image_name" --public \
             --container-format=tgz --disk-format=raw \
-            --property vm_mode=xen < "${IMAGE}"
+            --property vm_mode=xen < "${image}"
         return
     fi
 
-    KERNEL=""
-    RAMDISK=""
-    DISK_FORMAT=""
-    CONTAINER_FORMAT=""
-    UNPACK=""
-    case "$IMAGE_FNAME" in
+    local kernel=""
+    local ramdisk=""
+    local disk_format=""
+    local container_format=""
+    local unpack=""
+    local img_property=""
+    case "$image_fname" in
         *.tar.gz|*.tgz)
             # Extract ami and aki files
-            [ "${IMAGE_FNAME%.tar.gz}" != "$IMAGE_FNAME" ] &&
-                IMAGE_NAME="${IMAGE_FNAME%.tar.gz}" ||
-                IMAGE_NAME="${IMAGE_FNAME%.tgz}"
-            xdir="$FILES/images/$IMAGE_NAME"
+            [ "${image_fname%.tar.gz}" != "$image_fname" ] &&
+                image_name="${image_fname%.tar.gz}" ||
+                image_name="${image_fname%.tgz}"
+            local xdir="$FILES/images/$image_name"
             rm -Rf "$xdir";
             mkdir "$xdir"
-            tar -zxf $IMAGE -C "$xdir"
-            KERNEL=$(for f in "$xdir/"*-vmlinuz* "$xdir/"aki-*/image; do
+            tar -zxf $image -C "$xdir"
+            kernel=$(for f in "$xdir/"*-vmlinuz* "$xdir/"aki-*/image; do
                 [ -f "$f" ] && echo "$f" && break; done; true)
-            RAMDISK=$(for f in "$xdir/"*-initrd* "$xdir/"ari-*/image; do
+            ramdisk=$(for f in "$xdir/"*-initrd* "$xdir/"ari-*/image; do
                 [ -f "$f" ] && echo "$f" && break; done; true)
-            IMAGE=$(for f in "$xdir/"*.img "$xdir/"ami-*/image; do
+            image=$(for f in "$xdir/"*.img "$xdir/"ami-*/image; do
                 [ -f "$f" ] && echo "$f" && break; done; true)
-            if [[ -z "$IMAGE_NAME" ]]; then
-                IMAGE_NAME=$(basename "$IMAGE" ".img")
+            if [[ -z "$image_name" ]]; then
+                image_name=$(basename "$image" ".img")
             fi
             ;;
         *.img)
-            IMAGE_NAME=$(basename "$IMAGE" ".img")
-            format=$(qemu-img info ${IMAGE} | awk '/^file format/ { print $3; exit }')
+            image_name=$(basename "$image" ".img")
+            local format=$(qemu-img info ${image} | awk '/^file format/ { print $3; exit }')
             if [[ ",qcow2,raw,vdi,vmdk,vpc," =~ ",$format," ]]; then
-                DISK_FORMAT=$format
+                disk_format=$format
             else
-                DISK_FORMAT=raw
+                disk_format=raw
             fi
-            CONTAINER_FORMAT=bare
+            container_format=bare
             ;;
         *.img.gz)
-            IMAGE_NAME=$(basename "$IMAGE" ".img.gz")
-            DISK_FORMAT=raw
-            CONTAINER_FORMAT=bare
-            UNPACK=zcat
+            image_name=$(basename "$image" ".img.gz")
+            disk_format=raw
+            container_format=bare
+            unpack=zcat
             ;;
         *.qcow2)
-            IMAGE_NAME=$(basename "$IMAGE" ".qcow2")
-            DISK_FORMAT=qcow2
-            CONTAINER_FORMAT=bare
+            image_name=$(basename "$image" ".qcow2")
+            disk_format=qcow2
+            container_format=bare
             ;;
         *.iso)
-            IMAGE_NAME=$(basename "$IMAGE" ".iso")
-            DISK_FORMAT=iso
-            CONTAINER_FORMAT=bare
+            image_name=$(basename "$image" ".iso")
+            disk_format=iso
+            container_format=bare
             ;;
-        *) echo "Do not know what to do with $IMAGE_FNAME"; false;;
+        *.vhd|*.vhdx|*.vhd.gz|*.vhdx.gz)
+            local extension="${image_fname#*.}"
+            image_name=$(basename "$image" ".$extension")
+            disk_format=vhd
+            container_format=bare
+            if [ "${image_fname##*.}" == "gz" ]; then
+                unpack=zcat
+            fi
+            ;;
+        *) echo "Do not know what to do with $image_fname"; false;;
     esac
 
     if is_arch "ppc64"; then
-        IMG_PROPERTY="--property hw_cdrom_bus=scsi"
+        img_property="--property hw_cdrom_bus=scsi"
     fi
 
-    if [ "$CONTAINER_FORMAT" = "bare" ]; then
-        if [ "$UNPACK" = "zcat" ]; then
-            glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" $IMG_PROPERTY --is-public True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < <(zcat --force "${IMAGE}")
+    if [ "$container_format" = "bare" ]; then
+        if [ "$unpack" = "zcat" ]; then
+            openstack --os-token $token --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < <(zcat --force "${image}")
         else
-            glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" $IMG_PROPERTY --is-public True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < "${IMAGE}"
+            openstack --os-token $token --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < "${image}"
         fi
     else
         # Use glance client to add the kernel the root filesystem.
         # We parse the results of the first upload to get the glance ID of the
         # kernel for use when uploading the root filesystem.
-        KERNEL_ID=""; RAMDISK_ID="";
-        if [ -n "$KERNEL" ]; then
-            KERNEL_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-kernel" $IMG_PROPERTY --is-public True --container-format aki --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2)
+        local kernel_id="" ramdisk_id="";
+        if [ -n "$kernel" ]; then
+            kernel_id=$(openstack --os-token $token --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image create "$image_name-kernel" $img_property --public --container-format aki --disk-format aki < "$kernel" | grep ' id ' | get_field 2)
         fi
-        if [ -n "$RAMDISK" ]; then
-            RAMDISK_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-ramdisk" $IMG_PROPERTY --is-public True --container-format ari --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2)
+        if [ -n "$ramdisk" ]; then
+            ramdisk_id=$(openstack --os-token $token --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image create "$image_name-ramdisk" $img_property --public --container-format ari --disk-format ari < "$ramdisk" | grep ' id ' | get_field 2)
         fi
-        glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "${IMAGE_NAME%.img}" $IMG_PROPERTY --is-public True --container-format ami --disk-format ami ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}"
+        openstack --os-token $token --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image create "${image_name%.img}" $img_property --public --container-format ami --disk-format ami ${kernel_id:+--property kernel_id=$kernel_id} ${ramdisk_id:+--property ramdisk_id=$ramdisk_id} < "${image}"
     fi
 }
 
@@ -340,7 +339,7 @@
 function wait_for_service {
     local timeout=$1
     local url=$2
-    timeout $timeout sh -c "while ! curl --noproxy '*' -s $url >/dev/null; do sleep 1; done"
+    timeout $timeout sh -c "while ! curl -k --noproxy '*' -s $url >/dev/null; do sleep 1; done"
 }
 
 
@@ -546,6 +545,40 @@
     }
 fi
 
+
+# create_disk - Create backing disk
+function create_disk {
+    local node_number
+    local disk_image=${1}
+    local storage_data_dir=${2}
+    local loopback_disk_size=${3}
+
+    # Create a loopback disk and format it to XFS.
+    if [[ -e ${disk_image} ]]; then
+        if egrep -q ${storage_data_dir} /proc/mounts; then
+            sudo umount ${storage_data_dir}/drives/sdb1
+            sudo rm -f ${disk_image}
+        fi
+    fi
+
+    sudo mkdir -p ${storage_data_dir}/drives/images
+
+    sudo truncate -s ${loopback_disk_size} ${disk_image}
+
+    # Make a fresh XFS filesystem. Use bigger inodes so xattr can fit in
+    # a single inode. Keeping the default inode size (256) will result in multiple
+    # inodes being used to store xattr. Retrieving the xattr will be slower
+    # since we have to read multiple inodes. This statement is true for both
+    # Swift and Ceph.
+    sudo mkfs.xfs -f -i size=1024 ${disk_image}
+
+    # Mount the disk with mount options to make it as efficient as possible
+    if ! egrep -q ${storage_data_dir} /proc/mounts; then
+        sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8  \
+            ${disk_image} ${storage_data_dir}
+    fi
+}
+
 # Restore xtrace
 $XTRACE
 
diff --git a/functions-common b/functions-common
index 9093952..4c61d6a 100644
--- a/functions-common
+++ b/functions-common
@@ -49,6 +49,7 @@
     local section=$2
     local option=$3
     shift 3
+
     local values="$(iniget_multiline $file $section $option) $@"
     iniset_multiline $file $section $option $values
     $xtrace
@@ -62,6 +63,7 @@
     local file=$1
     local section=$2
     local option=$3
+
     sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" "$file"
     $xtrace
 }
@@ -75,6 +77,7 @@
     local section=$2
     local option=$3
     local line
+
     line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file")
     echo ${line#*=}
     $xtrace
@@ -89,6 +92,7 @@
     local section=$2
     local option=$3
     local values
+
     values=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { s/^$option[ \t]*=[ \t]*//gp; }" "$file")
     echo ${values}
     $xtrace
@@ -103,6 +107,7 @@
     local section=$2
     local option=$3
     local line
+
     line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file")
     $xtrace
     [ -n "$line" ]
@@ -145,6 +150,7 @@
     local file=$1
     local section=$2
     local option=$3
+
     shift 3
     local values
     for v in $@; do
@@ -237,28 +243,28 @@
 # die_if_not_set $LINENO env-var "message"
 function die_if_not_set {
     local exitcode=$?
-    FXTRACE=$(set +o | grep xtrace)
+    local xtrace=$(set +o | grep xtrace)
     set +o xtrace
     local line=$1; shift
     local evar=$1; shift
     if ! is_set $evar || [ $exitcode != 0 ]; then
         die $line "$*"
     fi
-    $FXTRACE
+    $xtrace
 }
 
 # Prints line number and "message" in error format
 # err $LINENO "message"
 function err {
     local exitcode=$?
-    errXTRACE=$(set +o | grep xtrace)
+    local xtrace=$(set +o | grep xtrace)
     set +o xtrace
     local msg="[ERROR] ${BASH_SOURCE[2]}:$1 $2"
     echo $msg 1>&2;
     if [[ -n ${SCREEN_LOGDIR} ]]; then
         echo $msg >> "${SCREEN_LOGDIR}/error.log"
     fi
-    $errXTRACE
+    $xtrace
     return $exitcode
 }
 
@@ -268,14 +274,14 @@
 # err_if_not_set $LINENO env-var "message"
 function err_if_not_set {
     local exitcode=$?
-    errinsXTRACE=$(set +o | grep xtrace)
+    local xtrace=$(set +o | grep xtrace)
     set +o xtrace
     local line=$1; shift
     local evar=$1; shift
     if ! is_set $evar || [ $exitcode != 0 ]; then
         err $line "$*"
     fi
-    $errinsXTRACE
+    $xtrace
     return $exitcode
 }
 
@@ -304,14 +310,14 @@
 # warn $LINENO "message"
 function warn {
     local exitcode=$?
-    errXTRACE=$(set +o | grep xtrace)
+    local xtrace=$(set +o | grep xtrace)
     set +o xtrace
     local msg="[WARNING] ${BASH_SOURCE[2]}:$1 $2"
     echo $msg 1>&2;
     if [[ -n ${SCREEN_LOGDIR} ]]; then
         echo $msg >> "${SCREEN_LOGDIR}/error.log"
     fi
-    $errXTRACE
+    $xtrace
     return $exitcode
 }
 
@@ -322,13 +328,16 @@
 # Determine OS Vendor, Release and Update
 # Tested with OS/X, Ubuntu, RedHat, CentOS, Fedora
 # Returns results in global variables:
-# os_VENDOR - vendor name
-# os_RELEASE - release
-# os_UPDATE - update
-# os_PACKAGE - package type
-# os_CODENAME - vendor's codename for release
+# ``os_VENDOR`` - vendor name: ``Ubuntu``, ``Fedora``, etc
+# ``os_RELEASE`` - major release: ``14.04`` (Ubuntu), ``20`` (Fedora)
+# ``os_UPDATE`` - update: ex. the ``5`` in ``RHEL6.5``
+# ``os_PACKAGE`` - package type: ``deb`` or ``rpm``
+# ``os_CODENAME`` - vendor's codename for release: ``snow leopard``, ``trusty``
+declare os_VENDOR os_RELEASE os_UPDATE os_PACKAGE os_CODENAME
+
 # GetOSVersion
 function GetOSVersion {
+
     # Figure out which vendor we are
     if [[ -x "`which sw_vers 2>/dev/null`" ]]; then
         # OS/X
@@ -418,6 +427,8 @@
 
 # Translate the OS version values into common nomenclature
 # Sets global ``DISTRO`` from the ``os_*`` values
+declare DISTRO
+
 function GetDistro {
     GetOSVersion
     if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) ]]; then
@@ -452,9 +463,14 @@
 # Utility function for checking machine architecture
 # is_arch arch-type
 function is_arch {
-    ARCH_TYPE=$1
+    [[ "$(uname -m)" == "$1" ]]
+}
 
-    [[ "$(uname -m)" == "$ARCH_TYPE" ]]
+# Quick check for a rackspace host; n.b. rackspace provided images
+# have these Xen tools installed but a custom image may not.
+function is_rackspace {
+    [ -f /usr/bin/xenstore-ls ] && \
+        sudo /usr/bin/xenstore-ls vm-data | grep -q "Rackspace"
 }
 
 # Determine if current distribution is a Fedora-based distribution
@@ -500,6 +516,7 @@
 # ``get_release_name_from_branch branch-name``
 function get_release_name_from_branch {
     local branch=$1
+
     if [[ $branch =~ "stable/" ]]; then
         echo ${branch#*/}
     else
@@ -510,72 +527,73 @@
 # git clone only if directory doesn't exist already.  Since ``DEST`` might not
 # be owned by the installation user, we create the directory and change the
 # ownership to the proper user.
-# Set global RECLONE=yes to simulate a clone when dest-dir exists
-# Set global ERROR_ON_CLONE=True to abort execution with an error if the git repo
+# Set global ``RECLONE=yes`` to simulate a clone when dest-dir exists
+# Set global ``ERROR_ON_CLONE=True`` to abort execution with an error if the git repo
 # does not exist (default is False, meaning the repo will be cloned).
-# Uses global ``OFFLINE``
+# Uses globals ``ERROR_ON_CLONE``, ``OFFLINE``, ``RECLONE``
 # git_clone remote dest-dir branch
 function git_clone {
-    GIT_REMOTE=$1
-    GIT_DEST=$2
-    GIT_REF=$3
+    local git_remote=$1
+    local git_dest=$2
+    local git_ref=$3
+    local orig_dir=$(pwd)
+
     RECLONE=$(trueorfalse False $RECLONE)
-    local orig_dir=`pwd`
 
     if [[ "$OFFLINE" = "True" ]]; then
         echo "Running in offline mode, clones already exist"
         # print out the results so we know what change was used in the logs
-        cd $GIT_DEST
+        cd $git_dest
         git show --oneline | head -1
         cd $orig_dir
         return
     fi
 
-    if echo $GIT_REF | egrep -q "^refs"; then
+    if echo $git_ref | egrep -q "^refs"; then
         # If our branch name is a gerrit style refs/changes/...
-        if [[ ! -d $GIT_DEST ]]; then
+        if [[ ! -d $git_dest ]]; then
             [[ "$ERROR_ON_CLONE" = "True" ]] && \
                 die $LINENO "Cloning not allowed in this configuration"
-            git_timed clone $GIT_REMOTE $GIT_DEST
+            git_timed clone $git_remote $git_dest
         fi
-        cd $GIT_DEST
-        git_timed fetch $GIT_REMOTE $GIT_REF && git checkout FETCH_HEAD
+        cd $git_dest
+        git_timed fetch $git_remote $git_ref && git checkout FETCH_HEAD
     else
         # do a full clone only if the directory doesn't exist
-        if [[ ! -d $GIT_DEST ]]; then
+        if [[ ! -d $git_dest ]]; then
             [[ "$ERROR_ON_CLONE" = "True" ]] && \
                 die $LINENO "Cloning not allowed in this configuration"
-            git_timed clone $GIT_REMOTE $GIT_DEST
-            cd $GIT_DEST
+            git_timed clone $git_remote $git_dest
+            cd $git_dest
             # This checkout syntax works for both branches and tags
-            git checkout $GIT_REF
+            git checkout $git_ref
         elif [[ "$RECLONE" = "True" ]]; then
             # if it does exist then simulate what clone does if asked to RECLONE
-            cd $GIT_DEST
+            cd $git_dest
             # set the url to pull from and fetch
-            git remote set-url origin $GIT_REMOTE
+            git remote set-url origin $git_remote
             git_timed fetch origin
             # remove the existing ignored files (like pyc) as they cause breakage
             # (due to the py files having older timestamps than our pyc, so python
             # thinks the pyc files are correct using them)
-            find $GIT_DEST -name '*.pyc' -delete
+            find $git_dest -name '*.pyc' -delete
 
-            # handle GIT_REF accordingly to type (tag, branch)
-            if [[ -n "`git show-ref refs/tags/$GIT_REF`" ]]; then
-                git_update_tag $GIT_REF
-            elif [[ -n "`git show-ref refs/heads/$GIT_REF`" ]]; then
-                git_update_branch $GIT_REF
-            elif [[ -n "`git show-ref refs/remotes/origin/$GIT_REF`" ]]; then
-                git_update_remote_branch $GIT_REF
+            # handle git_ref accordingly to type (tag, branch)
+            if [[ -n "`git show-ref refs/tags/$git_ref`" ]]; then
+                git_update_tag $git_ref
+            elif [[ -n "`git show-ref refs/heads/$git_ref`" ]]; then
+                git_update_branch $git_ref
+            elif [[ -n "`git show-ref refs/remotes/origin/$git_ref`" ]]; then
+                git_update_remote_branch $git_ref
             else
-                die $LINENO "$GIT_REF is neither branch nor tag"
+                die $LINENO "$git_ref is neither branch nor tag"
             fi
 
         fi
     fi
 
     # print out the results so we know what change was used in the logs
-    cd $GIT_DEST
+    cd $git_dest
     git show --oneline | head -1
     cd $orig_dir
 }
@@ -614,35 +632,32 @@
 # git update using reference as a branch.
 # git_update_branch ref
 function git_update_branch {
+    local git_branch=$1
 
-    GIT_BRANCH=$1
-
-    git checkout -f origin/$GIT_BRANCH
+    git checkout -f origin/$git_branch
     # a local branch might not exist
-    git branch -D $GIT_BRANCH || true
-    git checkout -b $GIT_BRANCH
+    git branch -D $git_branch || true
+    git checkout -b $git_branch
 }
 
 # git update using reference as a branch.
 # git_update_remote_branch ref
 function git_update_remote_branch {
+    local git_branch=$1
 
-    GIT_BRANCH=$1
-
-    git checkout -b $GIT_BRANCH -t origin/$GIT_BRANCH
+    git checkout -b $git_branch -t origin/$git_branch
 }
 
 # git update using reference as a tag. Be careful editing source at that repo
 # as working copy will be in a detached mode
 # git_update_tag ref
 function git_update_tag {
+    local git_tag=$1
 
-    GIT_TAG=$1
-
-    git tag -d $GIT_TAG
+    git tag -d $git_tag
     # fetching given tag only
-    git_timed fetch origin tag $GIT_TAG
-    git checkout -f $GIT_TAG
+    git_timed fetch origin tag $git_tag
+    git checkout -f $git_tag
 }
 
 
@@ -662,16 +677,17 @@
     # Search for an IP unless an explicit is set by ``HOST_IP`` environment variable
     if [ -z "$host_ip" -o "$host_ip" == "dhcp" ]; then
         host_ip=""
-        host_ips=`LC_ALL=C ip -f inet addr show ${host_ip_iface} | awk '/inet/ {split($2,parts,"/");  print parts[1]}'`
-        for IP in $host_ips; do
+        local host_ips=$(LC_ALL=C ip -f inet addr show ${host_ip_iface} | awk '/inet/ {split($2,parts,"/");  print parts[1]}')
+        local ip
+        for ip in $host_ips; do
             # Attempt to filter out IP addresses that are part of the fixed and
             # floating range. Note that this method only works if the ``netaddr``
             # python library is installed. If it is not installed, an error
             # will be printed and the first IP from the interface will be used.
             # If that is not correct set ``HOST_IP`` in ``localrc`` to the correct
             # address.
-            if ! (address_in_net $IP $fixed_range || address_in_net $IP $floating_range); then
-                host_ip=$IP
+            if ! (address_in_net $ip $fixed_range || address_in_net $ip $floating_range); then
+                host_ip=$ip
                 break;
             fi
         done
@@ -679,11 +695,19 @@
     echo $host_ip
 }
 
+# Generates hex string from ``size`` byte of pseudo random data
+# generate_hex_string size
+function generate_hex_string {
+    local size=$1
+    hexdump -n "$size" -v -e '/1 "%02x"' /dev/urandom
+}
+
 # Grab a numbered field from python prettytable output
 # Fields are numbered starting with 1
 # Reverse syntax is supported: -1 is the last field, -2 is second to last, etc.
 # get_field field-number
 function get_field {
+    local data field
     while read data; do
         if [ "$1" -lt 0 ]; then
             field="(\$(NF$1))"
@@ -723,10 +747,15 @@
 }
 
 # Gets or creates user
-# Usage: get_or_create_user <username> <password> <project> <email>
+# Usage: get_or_create_user <username> <password> <project> [<email>]
 function get_or_create_user {
+    if [[ ! -z "$4" ]]; then
+        local email="--email=$4"
+    else
+        local email=""
+    fi
     # Gets user id
-    USER_ID=$(
+    local user_id=$(
         # Gets user id
         openstack user show $1 -f value -c id 2>/dev/null ||
         # Creates new user
@@ -734,63 +763,63 @@
             $1 \
             --password "$2" \
             --project $3 \
-            --email $4 \
+            $email \
             -f value -c id
     )
-    echo $USER_ID
+    echo $user_id
 }
 
 # Gets or creates project
 # Usage: get_or_create_project <name>
 function get_or_create_project {
     # Gets project id
-    PROJECT_ID=$(
+    local project_id=$(
         # Gets project id
         openstack project show $1 -f value -c id 2>/dev/null ||
         # Creates new project if not exists
         openstack project create $1 -f value -c id
     )
-    echo $PROJECT_ID
+    echo $project_id
 }
 
 # Gets or creates role
 # Usage: get_or_create_role <name>
 function get_or_create_role {
-    ROLE_ID=$(
+    local role_id=$(
         # Gets role id
         openstack role show $1 -f value -c id 2>/dev/null ||
         # Creates role if not exists
         openstack role create $1 -f value -c id
     )
-    echo $ROLE_ID
+    echo $role_id
 }
 
 # Gets or adds user role
 # Usage: get_or_add_user_role <role> <user> <project>
 function get_or_add_user_role {
     # Gets user role id
-    USER_ROLE_ID=$(openstack user role list \
+    local user_role_id=$(openstack user role list \
         $2 \
         --project $3 \
         --column "ID" \
         --column "Name" \
         | grep " $1 " | get_field 1)
-    if [[ -z "$USER_ROLE_ID" ]]; then
+    if [[ -z "$user_role_id" ]]; then
         # Adds role to user
-        USER_ROLE_ID=$(openstack role add \
+        user_role_id=$(openstack role add \
             $1 \
             --user $2 \
             --project $3 \
             | grep " id " | get_field 2)
     fi
-    echo $USER_ROLE_ID
+    echo $user_role_id
 }
 
 # Gets or creates service
 # Usage: get_or_create_service <name> <type> <description>
 function get_or_create_service {
     # Gets service id
-    SERVICE_ID=$(
+    local service_id=$(
         # Gets service id
         openstack service show $1 -f value -c id 2>/dev/null ||
         # Creates new service if not exists
@@ -800,22 +829,22 @@
             --description="$3" \
             -f value -c id
     )
-    echo $SERVICE_ID
+    echo $service_id
 }
 
 # Gets or creates endpoint
 # Usage: get_or_create_endpoint <service> <region> <publicurl> <adminurl> <internalurl>
 function get_or_create_endpoint {
     # Gets endpoint id
-    ENDPOINT_ID=$(openstack endpoint list \
+    local endpoint_id=$(openstack endpoint list \
         --column "ID" \
         --column "Region" \
         --column "Service Name" \
         | grep " $2 " \
         | grep " $1 " | get_field 1)
-    if [[ -z "$ENDPOINT_ID" ]]; then
+    if [[ -z "$endpoint_id" ]]; then
         # Creates new endpoint
-        ENDPOINT_ID=$(openstack endpoint create \
+        endpoint_id=$(openstack endpoint create \
             $1 \
             --region $2 \
             --publicurl $3 \
@@ -823,9 +852,10 @@
             --internalurl $5 \
             | grep " id " | get_field 2)
     fi
-    echo $ENDPOINT_ID
+    echo $endpoint_id
 }
 
+
 # Package Functions
 # =================
 
@@ -986,9 +1016,10 @@
 }
 
 # Distro-agnostic package installer
+# Uses globals ``NO_UPDATE_REPOS``, ``REPOS_UPDATED``, ``RETRY_UPDATE``
 # install_package package [package ...]
 function update_package_repo {
-    if [[ "NO_UPDATE_REPOS" = "True" ]]; then
+    if [[ "$NO_UPDATE_REPOS" = "True" ]]; then
         return 0
     fi
 
@@ -1086,6 +1117,7 @@
 }
 
 # zypper wrapper to set arguments correctly
+# Uses globals ``OFFLINE``, ``*_proxy``
 # zypper_install package [package ...]
 function zypper_install {
     [[ "$OFFLINE" = "True" ]] && return
@@ -1102,11 +1134,15 @@
 # _run_process() is designed to be backgrounded by run_process() to simulate a
 # fork.  It includes the dirty work of closing extra filehandles and preparing log
 # files to produce the same logs as screen_it().  The log filename is derived
-# from the service name and global-and-now-misnamed SCREEN_LOGDIR
-# _run_process service "command-line"
+# from the service name and global-and-now-misnamed ``SCREEN_LOGDIR``
+# Uses globals ``CURRENT_LOG_TIME``, ``SCREEN_LOGDIR``, ``SCREEN_NAME``, ``SERVICE_DIR``
+# If an optional group is provided sg will be used to set the group of
+# the command.
+# _run_process service "command-line" [group]
 function _run_process {
     local service=$1
     local command="$2"
+    local group=$3
 
     # Undo logging redirections and close the extra descriptors
     exec 1>&3
@@ -1115,19 +1151,28 @@
     exec 6>&-
 
     if [[ -n ${SCREEN_LOGDIR} ]]; then
-        exec 1>&${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log 2>&1
-        ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log
+        exec 1>&${SCREEN_LOGDIR}/screen-${service}.${CURRENT_LOG_TIME}.log 2>&1
+        ln -sf ${SCREEN_LOGDIR}/screen-${service}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${service}.log
 
         # TODO(dtroyer): Hack to get stdout from the Python interpreter for the logs.
         export PYTHONUNBUFFERED=1
     fi
 
-    exec /bin/bash -c "$command"
-    die "$service exec failure: $command"
+    # Run under ``setsid`` to force the process to become a session and group leader.
+    # The pid saved can be used with pkill -g to get the entire process group.
+    if [[ -n "$group" ]]; then
+        setsid sg $group "$command" & echo $! >$SERVICE_DIR/$SCREEN_NAME/$service.pid
+    else
+        setsid $command & echo $! >$SERVICE_DIR/$SCREEN_NAME/$service.pid
+    fi
+
+    # Just silently exit this process
+    exit 0
 }
 
 # Helper to remove the ``*.failure`` files under ``$SERVICE_DIR/$SCREEN_NAME``.
 # This is used for ``service_check`` when all the ``screen_it`` are called finished
+# Uses globals ``SCREEN_NAME``, ``SERVICE_DIR``
 # init_service_check
 function init_service_check {
     SCREEN_NAME=${SCREEN_NAME:-stack}
@@ -1145,68 +1190,81 @@
 function is_running {
     local name=$1
     ps auxw | grep -v grep | grep ${name} > /dev/null
-    RC=$?
+    local exitcode=$?
     # some times I really hate bash reverse binary logic
-    return $RC
+    return $exitcode
 }
 
-# run_process() launches a child process that closes all file descriptors and
-# then exec's the passed in command.  This is meant to duplicate the semantics
-# of screen_it() without screen.  PIDs are written to
-# $SERVICE_DIR/$SCREEN_NAME/$service.pid
-# run_process service "command-line"
+# Run a single service under screen or directly
+# If the command includes shell metachatacters (;<>*) it must be run using a shell
+# If an optional group is provided sg will be used to run the
+# command as that group.
+# run_process service "command-line" [group]
 function run_process {
     local service=$1
     local command="$2"
+    local group=$3
 
-    # Spawn the child process
-    _run_process "$service" "$command" &
-    echo $!
-}
-
-# Helper to launch a service in a named screen
-# screen_it service "command-line"
-function screen_it {
-    SCREEN_NAME=${SCREEN_NAME:-stack}
-    SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
-    USE_SCREEN=$(trueorfalse True $USE_SCREEN)
-
-    if is_service_enabled $1; then
-        # Append the service to the screen rc file
-        screen_rc "$1" "$2"
-
+    if is_service_enabled $service; then
         if [[ "$USE_SCREEN" = "True" ]]; then
-            screen -S $SCREEN_NAME -X screen -t $1
-
-            if [[ -n ${SCREEN_LOGDIR} ]]; then
-                screen -S $SCREEN_NAME -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log
-                screen -S $SCREEN_NAME -p $1 -X log on
-                ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log
-            fi
-
-            # sleep to allow bash to be ready to be send the command - we are
-            # creating a new window in screen and then sends characters, so if
-            # bash isn't running by the time we send the command, nothing happens
-            sleep 3
-
-            NL=`echo -ne '\015'`
-            # This fun command does the following:
-            # - the passed server command is backgrounded
-            # - the pid of the background process is saved in the usual place
-            # - the server process is brought back to the foreground
-            # - if the server process exits prematurely the fg command errors
-            #   and a message is written to stdout and the service failure file
-            # The pid saved can be used in screen_stop() as a process group
-            # id to kill off all child processes
-            screen -S $SCREEN_NAME -p $1 -X stuff "$2 & echo \$! >$SERVICE_DIR/$SCREEN_NAME/$1.pid; fg || echo \"$1 failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL"
+            screen_service "$service" "$command" "$group"
         else
             # Spawn directly without screen
-            run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$1.pid
+            _run_process "$service" "$command" "$group" &
         fi
     fi
 }
 
+# Helper to launch a service in a named screen
+# Uses globals ``CURRENT_LOG_TIME``, ``SCREEN_NAME``, ``SCREEN_LOGDIR``,
+# ``SERVICE_DIR``, ``USE_SCREEN``
+# screen_service service "command-line" [group]
+# Run a command in a shell in a screen window, if an optional group
+# is provided, use sg to set the group of the command.
+function screen_service {
+    local service=$1
+    local command="$2"
+    local group=$3
+
+    SCREEN_NAME=${SCREEN_NAME:-stack}
+    SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
+    USE_SCREEN=$(trueorfalse True $USE_SCREEN)
+
+    if is_service_enabled $service; then
+        # Append the service to the screen rc file
+        screen_rc "$service" "$command"
+
+        screen -S $SCREEN_NAME -X screen -t $service
+
+        if [[ -n ${SCREEN_LOGDIR} ]]; then
+            screen -S $SCREEN_NAME -p $service -X logfile ${SCREEN_LOGDIR}/screen-${service}.${CURRENT_LOG_TIME}.log
+            screen -S $SCREEN_NAME -p $service -X log on
+            ln -sf ${SCREEN_LOGDIR}/screen-${service}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${service}.log
+        fi
+
+        # sleep to allow bash to be ready to be send the command - we are
+        # creating a new window in screen and then sends characters, so if
+        # bash isn't running by the time we send the command, nothing happens
+        sleep 3
+
+        NL=`echo -ne '\015'`
+        # This fun command does the following:
+        # - the passed server command is backgrounded
+        # - the pid of the background process is saved in the usual place
+        # - the server process is brought back to the foreground
+        # - if the server process exits prematurely the fg command errors
+        #   and a message is written to stdout and the service failure file
+        # The pid saved can be used in stop_process() as a process group
+        # id to kill off all child processes
+        if [[ -n "$group" ]]; then
+            command="sg $group '$command'"
+        fi
+        screen -S $SCREEN_NAME -p $service -X stuff "$command & echo \$! >$SERVICE_DIR/$SCREEN_NAME/${service}.pid; fg || echo \"$service failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/${service}.failure\"$NL"
+    fi
+}
+
 # Screen rc file builder
+# Uses globals ``SCREEN_NAME``, ``SCREENRC``
 # screen_rc service "command-line"
 function screen_rc {
     SCREEN_NAME=${SCREEN_NAME:-stack}
@@ -1237,26 +1295,48 @@
 # If a PID is available use it, kill the whole process group via TERM
 # If screen is being used kill the screen window; this will catch processes
 # that did not leave a PID behind
-# screen_stop service
-function screen_stop {
+# Uses globals ``SCREEN_NAME``, ``SERVICE_DIR``, ``USE_SCREEN``
+# screen_stop_service service
+function screen_stop_service {
+    local service=$1
+
     SCREEN_NAME=${SCREEN_NAME:-stack}
     SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
     USE_SCREEN=$(trueorfalse True $USE_SCREEN)
 
-    if is_service_enabled $1; then
+    if is_service_enabled $service; then
+        # Clean up the screen window
+        screen -S $SCREEN_NAME -p $service -X kill
+    fi
+}
+
+# Stop a service process
+# If a PID is available use it, kill the whole process group via TERM
+# If screen is being used kill the screen window; this will catch processes
+# that did not leave a PID behind
+# Uses globals ``SERVICE_DIR``, ``USE_SCREEN``
+# stop_process service
+function stop_process {
+    local service=$1
+
+    SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
+    USE_SCREEN=$(trueorfalse True $USE_SCREEN)
+
+    if is_service_enabled $service; then
         # Kill via pid if we have one available
-        if [[ -r $SERVICE_DIR/$SCREEN_NAME/$1.pid ]]; then
-            pkill -TERM -P -$(cat $SERVICE_DIR/$SCREEN_NAME/$1.pid)
-            rm $SERVICE_DIR/$SCREEN_NAME/$1.pid
+        if [[ -r $SERVICE_DIR/$SCREEN_NAME/$service.pid ]]; then
+            pkill -g $(cat $SERVICE_DIR/$SCREEN_NAME/$service.pid)
+            rm $SERVICE_DIR/$SCREEN_NAME/$service.pid
         fi
         if [[ "$USE_SCREEN" = "True" ]]; then
             # Clean up the screen window
-            screen -S $SCREEN_NAME -p $1 -X kill
+            screen_stop_service $service
         fi
     fi
 }
 
 # Helper to get the status of each running service
+# Uses globals ``SCREEN_NAME``, ``SERVICE_DIR``
 # service_check
 function service_check {
     local service
@@ -1285,6 +1365,91 @@
     fi
 }
 
+# Tail a log file in a screen if USE_SCREEN is true.
+function tail_log {
+    local service=$1
+    local logfile=$2
+
+    USE_SCREEN=$(trueorfalse True $USE_SCREEN)
+    if [[ "$USE_SCREEN" = "True" ]]; then
+        screen_service "$service" "sudo tail -f $logfile"
+    fi
+}
+
+
+# Deprecated Functions
+# --------------------
+
+# _old_run_process() is designed to be backgrounded by old_run_process() to simulate a
+# fork.  It includes the dirty work of closing extra filehandles and preparing log
+# files to produce the same logs as screen_it().  The log filename is derived
+# from the service name and global-and-now-misnamed ``SCREEN_LOGDIR``
+# Uses globals ``CURRENT_LOG_TIME``, ``SCREEN_LOGDIR``, ``SCREEN_NAME``, ``SERVICE_DIR``
+# _old_run_process service "command-line"
+function _old_run_process {
+    local service=$1
+    local command="$2"
+
+    # Undo logging redirections and close the extra descriptors
+    exec 1>&3
+    exec 2>&3
+    exec 3>&-
+    exec 6>&-
+
+    if [[ -n ${SCREEN_LOGDIR} ]]; then
+        exec 1>&${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log 2>&1
+        ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log
+
+        # TODO(dtroyer): Hack to get stdout from the Python interpreter for the logs.
+        export PYTHONUNBUFFERED=1
+    fi
+
+    exec /bin/bash -c "$command"
+    die "$service exec failure: $command"
+}
+
+# old_run_process() launches a child process that closes all file descriptors and
+# then exec's the passed in command.  This is meant to duplicate the semantics
+# of screen_it() without screen.  PIDs are written to
+# ``$SERVICE_DIR/$SCREEN_NAME/$service.pid`` by the spawned child process.
+# old_run_process service "command-line"
+function old_run_process {
+    local service=$1
+    local command="$2"
+
+    # Spawn the child process
+    _old_run_process "$service" "$command" &
+    echo $!
+}
+
+# Compatibility for existing start_XXXX() functions
+# Uses global ``USE_SCREEN``
+# screen_it service "command-line"
+function screen_it {
+    if is_service_enabled $1; then
+        # Append the service to the screen rc file
+        screen_rc "$1" "$2"
+
+        if [[ "$USE_SCREEN" = "True" ]]; then
+            screen_service "$1" "$2"
+        else
+            # Spawn directly without screen
+            old_run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$1.pid
+        fi
+    fi
+}
+
+# Compatibility for existing stop_XXXX() functions
+# Stop a service in screen
+# If a PID is available use it, kill the whole process group via TERM
+# If screen is being used kill the screen window; this will catch processes
+# that did not leave a PID behind
+# screen_stop service
+function screen_stop {
+    # Clean up the screen window
+    stop_process $1
+}
+
 
 # Python Functions
 # ================
@@ -1324,20 +1489,24 @@
     if [[ -z "$os_PACKAGE" ]]; then
         GetOSVersion
     fi
-    if [[ $TRACK_DEPENDS = True ]]; then
+    if [[ $TRACK_DEPENDS = True && ! "$@" =~ virtualenv ]]; then
+        # TRACK_DEPENDS=True installation creates a circular dependency when
+        # we attempt to install virtualenv into a virualenv, so we must global
+        # that installation.
         source $DEST/.venv/bin/activate
-        CMD_PIP=$DEST/.venv/bin/pip
-        SUDO_PIP="env"
+        local cmd_pip=$DEST/.venv/bin/pip
+        local sudo_pip="env"
     else
-        SUDO_PIP="sudo"
-        CMD_PIP=$(get_pip_command)
+        local cmd_pip=$(get_pip_command)
+        local sudo_pip="sudo"
     fi
 
     # Mirror option not needed anymore because pypi has CDN available,
     # but it's useful in certain circumstances
     PIP_USE_MIRRORS=${PIP_USE_MIRRORS:-False}
+    local pip_mirror_opt=""
     if [[ "$PIP_USE_MIRRORS" != "False" ]]; then
-        PIP_MIRROR_OPT="--use-mirrors"
+        pip_mirror_opt="--use-mirrors"
     fi
 
     # pip < 1.4 has a bug where it will use an already existing build
@@ -1350,13 +1519,24 @@
     local pip_build_tmp=$(mktemp --tmpdir -d pip-build.XXXXX)
 
     $xtrace
-    $SUDO_PIP PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \
+    $sudo_pip PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \
         http_proxy=$http_proxy \
         https_proxy=$https_proxy \
         no_proxy=$no_proxy \
-        $CMD_PIP install --build=${pip_build_tmp} \
-        $PIP_MIRROR_OPT $@ \
-        && $SUDO_PIP rm -rf ${pip_build_tmp}
+        $cmd_pip install \
+        $pip_mirror_opt $@
+
+    if [[ "$INSTALL_TESTONLY_PACKAGES" == "True" ]]; then
+        local test_req="$@/test-requirements.txt"
+        if [[ -e "$test_req" ]]; then
+            $sudo_pip PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \
+                http_proxy=$http_proxy \
+                https_proxy=$https_proxy \
+                no_proxy=$no_proxy \
+                $cmd_pip install \
+                $pip_mirror_opt -r $test_req
+        fi
+    fi
 }
 
 # this should be used if you want to install globally, all libraries should
@@ -1391,7 +1571,7 @@
 
     if [[ $update_requirements != "changed" ]]; then
         (cd $REQUIREMENTS_DIR; \
-            $SUDO_CMD python update.py $project_dir)
+            python update.py $project_dir)
     fi
 
     setup_package $project_dir $flags
@@ -1498,6 +1678,7 @@
 # enable_service service [service ...]
 function enable_service {
     local tmpsvcs="${ENABLED_SERVICES}"
+    local service
     for service in $@; do
         if ! is_service_enabled $service; then
             tmpsvcs+=",$service"
@@ -1534,7 +1715,8 @@
     local xtrace=$(set +o | grep xtrace)
     set +o xtrace
     local enabled=1
-    services=$@
+    local services=$@
+    local service
     for service in ${services}; do
         [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && enabled=0
 
@@ -1549,6 +1731,7 @@
         #                are implemented
 
         [[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && enabled=0
+        [[ ${service} == n-cpu-* && ${ENABLED_SERVICES} =~ "n-cpu" ]] && enabled=0
         [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && enabled=0
         [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && enabled=0
         [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && enabled=0
@@ -1558,6 +1741,7 @@
         [[ ${service} == "trove" && ${ENABLED_SERVICES} =~ "tr-" ]] && enabled=0
         [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && enabled=0
         [[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && enabled=0
+        [[ ${service} == key-* && ${ENABLED_SERVICES} =~ "key" ]] && enabled=0
     done
     $xtrace
     return $enabled
@@ -1570,8 +1754,9 @@
 function use_exclusive_service {
     local options=${!1}
     local selection=$3
-    out=$2
+    local out=$2
     [ -z $selection ] || [[ ! "$options" =~ "$selection" ]] && return 1
+    local opt
     for opt in $options;do
         [[ "$opt" = "$selection" ]] && enable_service $opt || disable_service $opt
     done
@@ -1595,7 +1780,7 @@
 
     let last="${#args[*]} - 1"
 
-    dir_to_check=${args[$last]}
+    local dir_to_check=${args[$last]}
     if [ ! -d "$dir_to_check" ]; then
         dir_to_check=`dirname "$dir_to_check"`
     fi
diff --git a/lib/apache b/lib/apache
index f4f82a1..2c43681 100644
--- a/lib/apache
+++ b/lib/apache
@@ -59,6 +59,33 @@
     else
         exit_distro_not_supported "apache installation"
     fi
+
+    # ensure mod_version enabled for <IfVersion ...>.  This is
+    # built-in statically on anything recent, but precise (2.2)
+    # doesn't have it enabled
+    sudo a2enmod version || true
+}
+
+# get_apache_version() - return the version of Apache installed
+# This function is used to determine the Apache version installed. There are
+# various differences between Apache 2.2 and 2.4 that warrant special handling.
+function get_apache_version {
+    if is_ubuntu; then
+        local version_str=$(sudo /usr/sbin/apache2ctl -v | awk '/Server version/ {print $3}' | cut -f2 -d/)
+    elif is_fedora; then
+        local version_str=$(rpm -qa --queryformat '%{VERSION}' httpd)
+    elif is_suse; then
+        local version_str=$(rpm -qa --queryformat '%{VERSION}' apache2)
+    else
+        exit_distro_not_supported "cannot determine apache version"
+    fi
+    if [[ "$version_str" =~ ^2\.2\. ]]; then
+        echo "2.2"
+    elif [[ "$version_str" =~ ^2\.4\. ]]; then
+        echo "2.4"
+    else
+        exit_distro_not_supported "apache version not supported"
+    fi
 }
 
 # apache_site_config_for() - The filename of the site's configuration file.
@@ -87,8 +114,8 @@
 function apache_site_config_for {
     local site=$@
     if is_ubuntu; then
-        local apache_version=$(sudo /usr/sbin/apache2ctl -v | awk '/Server version/ {print $3}' | cut -f2 -d/)
-        if [[ "$apache_version" =~ ^2\.2\. ]]; then
+        local apache_version=$(get_apache_version)
+        if [[ "$apache_version" == "2.2" ]]; then
             # Ubuntu 12.04 - Apache 2.2
             echo $APACHE_CONF_DIR/${site}
         else
diff --git a/lib/baremetal b/lib/baremetal
index adcbe4c..af90c06 100644
--- a/lib/baremetal
+++ b/lib/baremetal
@@ -127,10 +127,6 @@
 BM_FLAVOR_ARCH=${BM_FLAVOR_ARCH:-$BM_CPU_ARCH}
 
 
-# Below this, we set some path and filenames.
-# Defaults are probably sufficient.
-DIB_DIR=${DIB_DIR:-$DEST/diskimage-builder}
-
 # Use DIB to create deploy ramdisk and kernel.
 BM_BUILD_DEPLOY_RAMDISK=`trueorfalse True $BM_BUILD_DEPLOY_RAMDISK`
 # If not use DIB, these files are used as deploy ramdisk/kernel.
@@ -165,8 +161,9 @@
 # Install diskimage-builder and shell-in-a-box
 # so that we can build the deployment kernel & ramdisk
 function prepare_baremetal_toolchain {
-    git_clone $DIB_REPO $DIB_DIR $DIB_BUILD_BRANCH
-
+    if [[ $(type -P ramdisk-image-create) == "" ]]; then
+        pip_install diskimage_builder
+    fi
     local shellinabox_basename=$(basename $BM_SHELL_IN_A_BOX)
     if [[ ! -e $DEST/$shellinabox_basename ]]; then
         cd $DEST
@@ -223,25 +220,27 @@
         BM_DEPLOY_KERNEL=bm-deploy.kernel
         BM_DEPLOY_RAMDISK=bm-deploy.initramfs
         if [ ! -e "$TOP_DIR/files/$BM_DEPLOY_KERNEL" -o ! -e "$TOP_DIR/files/$BM_DEPLOY_RAMDISK" ]; then
-            $DIB_DIR/bin/ramdisk-image-create $BM_DEPLOY_FLAVOR \
+            ramdisk-image-create $BM_DEPLOY_FLAVOR \
                 -o $TOP_DIR/files/bm-deploy
         fi
     fi
 
     # load them into glance
-    BM_DEPLOY_KERNEL_ID=$(glance \
-        --os-auth-token $token \
-        --os-image-url http://$GLANCE_HOSTPORT \
-        image-create \
-        --name $BM_DEPLOY_KERNEL \
-        --is-public True --disk-format=aki \
+    BM_DEPLOY_KERNEL_ID=$(openstack \
+        --os-token $token \
+        --os-url http://$GLANCE_HOSTPORT \
+        image create \
+        $BM_DEPLOY_KERNEL \
+        --public --disk-format=aki \
+        --container-format=aki \
         < $TOP_DIR/files/$BM_DEPLOY_KERNEL  | grep ' id ' | get_field 2)
-    BM_DEPLOY_RAMDISK_ID=$(glance \
-        --os-auth-token $token \
-        --os-image-url http://$GLANCE_HOSTPORT \
-        image-create \
-        --name $BM_DEPLOY_RAMDISK \
-        --is-public True --disk-format=ari \
+    BM_DEPLOY_RAMDISK_ID=$(openstack \
+        --os-token $token \
+        --os-url http://$GLANCE_HOSTPORT \
+        image create \
+        $BM_DEPLOY_RAMDISK \
+        --public --disk-format=ari \
+        --container-format=ari \
         < $TOP_DIR/files/$BM_DEPLOY_RAMDISK  | grep ' id ' | get_field 2)
 }
 
@@ -271,7 +270,7 @@
     image_name=$(basename "$file" ".qcow2")
 
     # this call returns the file names as "$kernel,$ramdisk"
-    out=$($DIB_DIR/bin/disk-image-get-kernel \
+    out=$(disk-image-get-kernel \
             -x -d $TOP_DIR/files -o bm-deploy -i $file)
     if [ $? -ne 0 ]; then
         die $LINENO "Failed to get kernel and ramdisk from $file"
@@ -284,19 +283,21 @@
     OUT_RAMDISK=${out##*,}
 
     # load them into glance
-    KERNEL_ID=$(glance \
-        --os-auth-token $token \
-        --os-image-url http://$GLANCE_HOSTPORT \
-        image-create \
-        --name $image_name-kernel \
-        --is-public True --disk-format=aki \
+    KERNEL_ID=$(openstack \
+        --os-token $token \
+        --os-url http://$GLANCE_HOSTPORT \
+        image create \
+        $image_name-kernel \
+        --public --disk-format=aki \
+        --container-format=aki \
         < $TOP_DIR/files/$OUT_KERNEL | grep ' id ' | get_field 2)
-    RAMDISK_ID=$(glance \
-        --os-auth-token $token \
-        --os-image-url http://$GLANCE_HOSTPORT \
-        image-create \
-        --name $image_name-initrd \
-        --is-public True --disk-format=ari \
+    RAMDISK_ID=$(openstack \
+        --os-token $token \
+        --os-url http://$GLANCE_HOSTPORT \
+        image create \
+        $image_name-initrd \
+        --public --disk-format=ari \
+        --container-format=ari \
         < $TOP_DIR/files/$OUT_RAMDISK | grep ' id ' | get_field 2)
 }
 
@@ -362,18 +363,18 @@
     if [ "$CONTAINER_FORMAT" = "bare" ]; then
         extract_and_upload_k_and_r_from_image $token $IMAGE
     elif [ "$CONTAINER_FORMAT" = "ami" ]; then
-        KERNEL_ID=$(glance \
-            --os-auth-token $token \
-            --os-image-url http://$GLANCE_HOSTPORT \
-            image-create \
-            --name "$IMAGE_NAME-kernel" --is-public True \
+        KERNEL_ID=$(openstack \
+            --os-token $token \
+            --os-url http://$GLANCE_HOSTPORT \
+            image create \
+            "$IMAGE_NAME-kernel" --public \
             --container-format aki \
             --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2)
-        RAMDISK_ID=$(glance \
-            --os-auth-token $token \
-            --os-image-url http://$GLANCE_HOSTPORT \
-            image-create \
-            --name "$IMAGE_NAME-ramdisk" --is-public True \
+        RAMDISK_ID=$(openstack \
+            --os-token $token \
+            --os-url http://$GLANCE_HOSTPORT \
+            image create \
+            "$IMAGE_NAME-ramdisk" --public \
             --container-format ari \
             --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2)
     else
@@ -381,11 +382,11 @@
         return
     fi
 
-    glance \
-        --os-auth-token $token \
-        --os-image-url http://$GLANCE_HOSTPORT \
-        image-create \
-        --name "${IMAGE_NAME%.img}" --is-public True \
+    openstack \
+        --os-token $token \
+        --os-url http://$GLANCE_HOSTPORT \
+        image create \
+        "${IMAGE_NAME%.img}" --public \
         --container-format $CONTAINER_FORMAT \
         --disk-format $DISK_FORMAT \
         ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} \
diff --git a/lib/ceilometer b/lib/ceilometer
index 1540e3e..9bb3121 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -41,6 +41,7 @@
 CEILOMETER_CONF=$CEILOMETER_CONF_DIR/ceilometer.conf
 CEILOMETER_API_LOG_DIR=/var/log/ceilometer-api
 CEILOMETER_AUTH_CACHE_DIR=${CEILOMETER_AUTH_CACHE_DIR:-/var/cache/ceilometer}
+CEILOMETER_WSGI_DIR=${CEILOMETER_WSGI_DIR:-/var/www/ceilometer}
 
 # Support potential entry-points console scripts
 CEILOMETER_BIN_DIR=$(get_python_exec_prefix)
@@ -52,6 +53,7 @@
 CEILOMETER_SERVICE_PROTOCOL=http
 CEILOMETER_SERVICE_HOST=$SERVICE_HOST
 CEILOMETER_SERVICE_PORT=${CEILOMETER_SERVICE_PORT:-8777}
+CEILOMETER_USE_MOD_WSGI=$(trueorfalse False $CEILOMETER_USE_MOD_WSGI)
 
 # To enable OSprofiler change value of this variable to "notifications,profiler"
 CEILOMETER_NOTIFICATION_TOPICS=${CEILOMETER_NOTIFICATION_TOPICS:-notifications}
@@ -79,19 +81,19 @@
 
 create_ceilometer_accounts() {
 
-    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
+    local service_tenant=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    local admin_role=$(openstack role list | awk "/ admin / { print \$2 }")
 
     # Ceilometer
     if [[ "$ENABLED_SERVICES" =~ "ceilometer-api" ]]; then
-        CEILOMETER_USER=$(get_or_create_user "ceilometer" \
-            "$SERVICE_PASSWORD" $SERVICE_TENANT "ceilometer@example.com")
-        get_or_add_user_role $ADMIN_ROLE $CEILOMETER_USER $SERVICE_TENANT
+        local ceilometer_user=$(get_or_create_user "ceilometer" \
+            "$SERVICE_PASSWORD" $service_tenant)
+        get_or_add_user_role $admin_role $ceilometer_user $service_tenant
 
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-            CEILOMETER_SERVICE=$(get_or_create_service "ceilometer" \
+            local ceilometer_service=$(get_or_create_service "ceilometer" \
                 "metering" "OpenStack Telemetry Service")
-            get_or_create_endpoint $CEILOMETER_SERVICE \
+            get_or_create_endpoint $ceilometer_service \
                 "$REGION_NAME" \
                 "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \
                 "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \
@@ -105,24 +107,43 @@
 }
 
 
+# _cleanup_keystone_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file
+function _cleanup_ceilometer_apache_wsgi {
+    sudo rm -f $CEILOMETER_WSGI_DIR/*
+    sudo rm -f $(apache_site_config_for ceilometer)
+}
+
 # cleanup_ceilometer() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
 function cleanup_ceilometer {
     if [ "$CEILOMETER_BACKEND" != 'mysql' ] && [ "$CEILOMETER_BACKEND" != 'postgresql' ] ; then
         mongo ceilometer --eval "db.dropDatabase();"
     fi
+    if [ "$CEILOMETER_USE_MOD_WSGI" == "True" ]; then
+        _cleanup_ceilometer_apache_wsgi
+    fi
 }
 
-# configure_ceilometerclient() - Set config files, create data dirs, etc
-function configure_ceilometerclient {
-    setup_develop $CEILOMETERCLIENT_DIR
-    sudo install -D -m 0644 -o $STACK_USER {$CEILOMETERCLIENT_DIR/tools/,/etc/bash_completion.d/}ceilometer.bash_completion
+function _config_ceilometer_apache_wsgi {
+    sudo mkdir -p $CEILOMETER_WSGI_DIR
+
+    local ceilometer_apache_conf=$(apache_site_config_for ceilometer)
+    local apache_version=$(get_apache_version)
+
+    # copy proxy vhost and wsgi file
+    sudo cp $CEILOMETER_DIR/ceilometer/api/app.wsgi $CEILOMETER_WSGI_DIR/app
+
+    sudo cp $FILES/apache-ceilometer.template $ceilometer_apache_conf
+    sudo sed -e "
+        s|%PORT%|$CEILOMETER_SERVICE_PORT|g;
+        s|%APACHE_NAME%|$APACHE_NAME|g;
+        s|%WSGIAPP%|$CEILOMETER_WSGI_DIR/app|g;
+        s|%USER%|$STACK_USER|g
+    " -i $ceilometer_apache_conf
 }
 
 # configure_ceilometer() - Set config files, create data dirs, etc
 function configure_ceilometer {
-    setup_develop $CEILOMETER_DIR
-
     [ ! -d $CEILOMETER_CONF_DIR ] && sudo mkdir -m 755 -p $CEILOMETER_CONF_DIR
     sudo chown $STACK_USER $CEILOMETER_CONF_DIR
 
@@ -154,14 +175,11 @@
     iniset $CEILOMETER_CONF service_credentials os_password $SERVICE_PASSWORD
     iniset $CEILOMETER_CONF service_credentials os_tenant_name $SERVICE_TENANT_NAME
 
-    iniset $CEILOMETER_CONF keystone_authtoken identity_uri $KEYSTONE_AUTH_URI
-    iniset $CEILOMETER_CONF keystone_authtoken admin_user ceilometer
-    iniset $CEILOMETER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
-    iniset $CEILOMETER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
-    iniset $CEILOMETER_CONF keystone_authtoken signing_dir $CEILOMETER_AUTH_CACHE_DIR
+    configure_auth_token_middleware $CEILOMETER_CONF ceilometer $CEILOMETER_AUTH_CACHE_DIR
 
     if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] ; then
         iniset $CEILOMETER_CONF database connection `database_connection_url ceilometer`
+        iniset $CEILOMETER_CONF DEFAULT collector_workers $API_WORKERS
     else
         iniset $CEILOMETER_CONF database connection mongodb://localhost:27017/ceilometer
         configure_mongodb
@@ -174,6 +192,11 @@
         iniset $CEILOMETER_CONF vmware host_username "$VMWAREAPI_USER"
         iniset $CEILOMETER_CONF vmware host_password "$VMWAREAPI_PASSWORD"
     fi
+
+    if [ "$CEILOMETER_USE_MOD_WSGI" == "True" ]; then
+        iniset $CEILOMETER_CONF api pecan_debug "False"
+        _config_ceilometer_apache_wsgi
+    fi
 }
 
 function configure_mongodb {
@@ -218,25 +241,41 @@
 # install_ceilometer() - Collect source and prepare
 function install_ceilometer {
     git_clone $CEILOMETER_REPO $CEILOMETER_DIR $CEILOMETER_BRANCH
+    setup_develop $CEILOMETER_DIR
+
 }
 
 # install_ceilometerclient() - Collect source and prepare
 function install_ceilometerclient {
     git_clone $CEILOMETERCLIENT_REPO $CEILOMETERCLIENT_DIR $CEILOMETERCLIENT_BRANCH
+    setup_develop $CEILOMETERCLIENT_DIR
+    sudo install -D -m 0644 -o $STACK_USER {$CEILOMETERCLIENT_DIR/tools/,/etc/bash_completion.d/}ceilometer.bash_completion
 }
 
 # start_ceilometer() - Start running processes, including screen
 function start_ceilometer {
+    run_process ceilometer-acentral "ceilometer-agent-central --config-file $CEILOMETER_CONF"
+    run_process ceilometer-anotification "ceilometer-agent-notification --config-file $CEILOMETER_CONF"
+    run_process ceilometer-collector "ceilometer-collector --config-file $CEILOMETER_CONF"
+
+    if [[ "$CEILOMETER_USE_MOD_WSGI" == "False" ]]; then
+        run_process ceilometer-api "ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF"
+    else
+        enable_apache_site ceilometer
+        restart_apache_server
+        tail_log ceilometer /var/log/$APACHE_NAME/ceilometer.log
+        tail_log ceilometer-api /var/log/$APACHE_NAME/ceilometer_access.log
+    fi
+
+
+    # Start the compute agent last to allow time for the collector to
+    # fully wake up and connect to the message bus. See bug #1355809
     if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
-        screen_it ceilometer-acompute "cd ; sg $LIBVIRT_GROUP 'ceilometer-agent-compute --config-file $CEILOMETER_CONF'"
+        run_process ceilometer-acompute "ceilometer-agent-compute --config-file $CEILOMETER_CONF" $LIBVIRT_GROUP
     fi
     if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then
-        screen_it ceilometer-acompute "cd ; ceilometer-agent-compute --config-file $CEILOMETER_CONF"
+        run_process ceilometer-acompute "ceilometer-agent-compute --config-file $CEILOMETER_CONF"
     fi
-    screen_it ceilometer-acentral "cd ; ceilometer-agent-central --config-file $CEILOMETER_CONF"
-    screen_it ceilometer-anotification "cd ; ceilometer-agent-notification --config-file $CEILOMETER_CONF"
-    screen_it ceilometer-collector "cd ; ceilometer-collector --config-file $CEILOMETER_CONF"
-    screen_it ceilometer-api "cd ; ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF"
 
     # only die on API if it was actually intended to be turned on
     if is_service_enabled ceilometer-api; then
@@ -246,15 +285,19 @@
         fi
     fi
 
-    screen_it ceilometer-alarm-notifier "cd ; ceilometer-alarm-notifier --config-file $CEILOMETER_CONF"
-    screen_it ceilometer-alarm-evaluator "cd ; ceilometer-alarm-evaluator --config-file $CEILOMETER_CONF"
+    run_process ceilometer-alarm-notifier "ceilometer-alarm-notifier --config-file $CEILOMETER_CONF"
+    run_process ceilometer-alarm-evaluator "ceilometer-alarm-evaluator --config-file $CEILOMETER_CONF"
 }
 
 # stop_ceilometer() - Stop running processes
 function stop_ceilometer {
+    if [ "$CEILOMETER_USE_MOD_WSGI" == "True" ]; then
+        disable_apache_site ceilometer
+        restart_apache_server
+    fi
     # Kill the ceilometer screen windows
     for serv in ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-evaluator; do
-        screen_stop $serv
+        stop_process $serv
     done
 }
 
diff --git a/lib/ceph b/lib/ceph
new file mode 100644
index 0000000..30ca903
--- /dev/null
+++ b/lib/ceph
@@ -0,0 +1,287 @@
+# lib/ceph
+# Functions to control the configuration and operation of the **Ceph** storage service
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``CEPH_DATA_DIR`` or ``DATA_DIR`` must be defined
+
+# ``stack.sh`` calls the entry points in this order (via ``extras.d/60-ceph.sh``):
+#
+# - install_ceph
+# - configure_ceph
+# - init_ceph
+# - start_ceph
+# - stop_ceph
+# - cleanup_ceph
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+
+# Set ``CEPH_DATA_DIR`` to the location of Ceph drives and objects.
+# Default is the common DevStack data directory.
+CEPH_DATA_DIR=${CEPH_DATA_DIR:-/var/lib/ceph}
+CEPH_DISK_IMAGE=${CEPH_DATA_DIR}/drives/images/ceph.img
+
+# Set ``CEPH_CONF_DIR`` to the location of the configuration files.
+# Default is ``/etc/ceph``.
+CEPH_CONF_DIR=${CEPH_CONF_DIR:-/etc/ceph}
+
+# DevStack will create a loop-back disk formatted as XFS to store the
+# Ceph data. Set ``CEPH_LOOPBACK_DISK_SIZE`` to the disk size in
+# kilobytes.
+# Default is 1 gigabyte.
+CEPH_LOOPBACK_DISK_SIZE_DEFAULT=4G
+CEPH_LOOPBACK_DISK_SIZE=${CEPH_LOOPBACK_DISK_SIZE:-$CEPH_LOOPBACK_DISK_SIZE_DEFAULT}
+
+# Common
+CEPH_FSID=$(uuidgen)
+CEPH_CONF_FILE=${CEPH_CONF_DIR}/ceph.conf
+
+# Glance
+GLANCE_CEPH_USER=${GLANCE_CEPH_USER:-glance}
+GLANCE_CEPH_POOL=${GLANCE_CEPH_POOL:-images}
+GLANCE_CEPH_POOL_PG=${GLANCE_CEPH_POOL_PG:-8}
+GLANCE_CEPH_POOL_PGP=${GLANCE_CEPH_POOL_PGP:-8}
+
+# Nova
+NOVA_CEPH_POOL=${NOVA_CEPH_POOL:-vms}
+NOVA_CEPH_POOL_PG=${NOVA_CEPH_POOL_PG:-8}
+NOVA_CEPH_POOL_PGP=${NOVA_CEPH_POOL_PGP:-8}
+
+# Cinder
+CINDER_CEPH_POOL=${CINDER_CEPH_POOL:-volumes}
+CINDER_CEPH_POOL_PG=${CINDER_CEPH_POOL_PG:-8}
+CINDER_CEPH_POOL_PGP=${CINDER_CEPH_POOL_PGP:-8}
+CINDER_CEPH_USER=${CINDER_CEPH_USER:-cinder}
+CINDER_CEPH_UUID=${CINDER_CEPH_UUID:-$(uuidgen)}
+
+# Set ``CEPH_REPLICAS`` to configure how many replicas are to be
+# configured for your Ceph cluster. By default we are configuring
+# only one replica since this is way less CPU and memory intensive. If
+# you are planning to test Ceph replication feel free to increase this value
+CEPH_REPLICAS=${CEPH_REPLICAS:-1}
+CEPH_REPLICAS_SEQ=$(seq ${CEPH_REPLICAS})
+
+# Functions
+# ------------
+
+# import_libvirt_secret_ceph() - Imports Cinder user key into libvirt
+# so it can connect to the Ceph cluster while attaching a Cinder block device
+function import_libvirt_secret_ceph {
+    cat > secret.xml <<EOF
+<secret ephemeral='no' private='no'>
+   <uuid>${CINDER_CEPH_UUID}</uuid>
+   <usage type='ceph'>
+     <name>client.${CINDER_CEPH_USER} secret</name>
+   </usage>
+</secret>
+EOF
+    sudo virsh secret-define --file secret.xml
+    sudo virsh secret-set-value --secret ${CINDER_CEPH_UUID} --base64 $(sudo ceph -c ${CEPH_CONF_FILE} auth get-key client.${CINDER_CEPH_USER})
+    sudo rm -f secret.xml
+}
+
+# cleanup_ceph() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function cleanup_ceph {
+    sudo pkill -f ceph-mon
+    sudo pkill -f ceph-osd
+    sudo rm -rf ${CEPH_DATA_DIR}/*/*
+    sudo rm -rf ${CEPH_CONF_DIR}/*
+    if egrep -q ${CEPH_DATA_DIR} /proc/mounts; then
+        sudo umount ${CEPH_DATA_DIR}
+    fi
+    if [[ -e ${CEPH_DISK_IMAGE} ]]; then
+        sudo rm -f ${CEPH_DISK_IMAGE}
+    fi
+    uninstall_package ceph ceph-common python-ceph libcephfs1 > /dev/null 2>&1
+    VIRSH_UUID=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
+    sudo virsh secret-undefine ${VIRSH_UUID} >/dev/null 2>&1
+}
+
+# configure_ceph() - Set config files, create data dirs, etc
+function configure_ceph {
+    local count=0
+
+    # create a backing file disk
+    create_disk ${CEPH_DISK_IMAGE} ${CEPH_DATA_DIR} ${CEPH_LOOPBACK_DISK_SIZE}
+
+    # populate ceph directory
+    sudo mkdir -p ${CEPH_DATA_DIR}/{bootstrap-mds,bootstrap-osd,mds,mon,osd,tmp}
+
+    # create ceph monitor initial key and directory
+    sudo ceph-authtool /var/lib/ceph/tmp/keyring.mon.$(hostname) --create-keyring --name=mon. --add-key=$(ceph-authtool --gen-print-key) --cap mon 'allow *'
+    sudo mkdir /var/lib/ceph/mon/ceph-$(hostname)
+
+    # create a default ceph configuration file
+    sudo tee -a ${CEPH_CONF_FILE} > /dev/null <<EOF
+[global]
+fsid = ${CEPH_FSID}
+mon_initial_members = $(hostname)
+mon_host = ${SERVICE_HOST}
+auth_cluster_required = cephx
+auth_service_required = cephx
+auth_client_required = cephx
+filestore_xattr_use_omap = true
+osd crush chooseleaf type = 0
+osd journal size = 100
+EOF
+
+    # bootstrap the ceph monitor
+    sudo ceph-mon -c ${CEPH_CONF_FILE} --mkfs -i $(hostname) --keyring /var/lib/ceph/tmp/keyring.mon.$(hostname)
+    if is_ubuntu; then
+    sudo touch /var/lib/ceph/mon/ceph-$(hostname)/upstart
+        sudo initctl emit ceph-mon id=$(hostname)
+    else
+    sudo touch /var/lib/ceph/mon/ceph-$(hostname)/sysvinit
+        sudo service ceph start mon.$(hostname)
+    fi
+
+    # wait for the admin key to come up otherwise we will not be able to do the actions below
+    until [ -f ${CEPH_CONF_DIR}/ceph.client.admin.keyring ]; do
+        echo_summary "Waiting for the Ceph admin key to be ready..."
+
+        count=$(($count + 1))
+        if [ $count -eq 3 ]; then
+            die $LINENO "Maximum of 3 retries reached"
+        fi
+        sleep 5
+    done
+
+    # change pool replica size according to the CEPH_REPLICAS set by the user
+    sudo ceph -c ${CEPH_CONF_FILE} osd pool set data size ${CEPH_REPLICAS}
+    sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd size ${CEPH_REPLICAS}
+    sudo ceph -c ${CEPH_CONF_FILE} osd pool set metadata size ${CEPH_REPLICAS}
+
+    # create a simple rule to take OSDs instead of host with CRUSH
+    # then apply this rules to the default pool
+    if [[ $CEPH_REPLICAS -ne 1 ]]; then
+        sudo ceph -c ${CEPH_CONF_FILE} osd crush rule create-simple devstack default osd
+        RULE_ID=$(sudo ceph -c ${CEPH_CONF_FILE} osd crush rule dump devstack | awk '/rule_id/ {print $3}' | cut -d ',' -f1)
+        sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd crush_ruleset ${RULE_ID}
+        sudo ceph -c ${CEPH_CONF_FILE} osd pool set data crush_ruleset ${RULE_ID}
+        sudo ceph -c ${CEPH_CONF_FILE} osd pool set metadata crush_ruleset ${RULE_ID}
+    fi
+
+    # create the OSD(s)
+    for rep in ${CEPH_REPLICAS_SEQ}; do
+        OSD_ID=$(sudo ceph -c ${CEPH_CONF_FILE} osd create)
+        sudo mkdir -p ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}
+        sudo ceph-osd -c ${CEPH_CONF_FILE} -i ${OSD_ID} --mkfs
+        sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create osd.${OSD_ID} mon 'allow profile osd ' osd 'allow *' | sudo tee ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/keyring
+
+        # ceph's init script is parsing ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/ and looking for a file
+        # 'upstart' or 'sysinitv', thanks to these 'touches' we are able to control OSDs daemons
+        # from the init script.
+        if is_ubuntu; then
+            sudo touch ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/upstart
+        else
+            sudo touch ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/sysvinit
+        fi
+    done
+}
+
+# configure_ceph_glance() - Glance config needs to come after Glance is set up
+function configure_ceph_glance {
+    # configure Glance service options, ceph pool, ceph user and ceph key
+    sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${GLANCE_CEPH_POOL} ${GLANCE_CEPH_POOL_PG} ${GLANCE_CEPH_POOL_PGP}
+    sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} size ${CEPH_REPLICAS}
+    if [[ $CEPH_REPLICAS -ne 1 ]]; then
+        sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} crush_ruleset ${RULE_ID}
+    fi
+    sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GLANCE_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
+    sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
+    iniset $GLANCE_API_CONF DEFAULT default_store rbd
+    iniset $GLANCE_API_CONF DEFAULT show_image_direct_url True
+    iniset $GLANCE_API_CONF glance_store stores "file, http, rbd"
+    iniset $GLANCE_API_CONF glance_store rbd_store_ceph_conf $CEPH_CONF_FILE
+    iniset $GLANCE_API_CONF glance_store rbd_store_user $GLANCE_CEPH_USER
+    iniset $GLANCE_API_CONF glance_store rbd_store_pool $GLANCE_CEPH_POOL
+}
+
+# configure_ceph_nova() - Nova config needs to come after Nova is set up
+function configure_ceph_nova {
+    # configure Nova service options, ceph pool, ceph user and ceph key
+    sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${NOVA_CEPH_POOL} ${NOVA_CEPH_POOL_PG} ${NOVA_CEPH_POOL_PGP}
+    sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} size ${CEPH_REPLICAS}
+    if [[ $CEPH_REPLICAS -ne 1 ]]; then
+        sudo -c ${CEPH_CONF_FILE} ceph osd pool set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID}
+    fi
+    iniset $NOVA_CONF libvirt rbd_user ${CINDER_CEPH_USER}
+    iniset $NOVA_CONF libvirt rbd_secret_uuid ${CINDER_CEPH_UUID}
+    iniset $NOVA_CONF libvirt inject_key false
+    iniset $NOVA_CONF libvirt inject_partition -2
+    iniset $NOVA_CONF libvirt disk_cachemodes "network=writeback"
+    iniset $NOVA_CONF libvirt images_type rbd
+    iniset $NOVA_CONF libvirt images_rbd_pool ${NOVA_CEPH_POOL}
+    iniset $NOVA_CONF libvirt images_rbd_ceph_conf ${CEPH_CONF_FILE}
+}
+
+# configure_ceph_cinder() - Cinder config needs to come after Cinder is set up
+function configure_ceph_cinder {
+    # Configure Cinder service options, ceph pool, ceph user and ceph key
+    sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP}
+    sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} size ${CEPH_REPLICAS}
+    if [[ $CEPH_REPLICAS -ne 1 ]]; then
+        sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} crush_ruleset ${RULE_ID}
+
+    fi
+    sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
+    sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
+}
+
+# init_ceph() - Initialize databases, etc.
+function init_ceph {
+    # clean up from previous (possibly aborted) runs
+    # make sure to kill all ceph processes first
+    sudo pkill -f ceph-mon || true
+    sudo pkill -f ceph-osd || true
+}
+
+# install_ceph() - Collect source and prepare
+function install_ceph {
+    # NOTE(dtroyer): At some point it'll be easier to test for unsupported distros,
+    #                leveraging the list in stack.sh
+    if [[ ${os_CODENAME} =~ trusty ]] || [[ ${os_CODENAME} =~ Schrödinger’sCat ]] || [[ ${os_CODENAME} =~ Heisenbug ]]; then
+        NO_UPDATE_REPOS=False
+        install_package ceph
+    else
+        exit_distro_not_supported "Ceph since your distro doesn't provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 19/20"
+    fi
+}
+
+# start_ceph() - Start running processes, including screen
+function start_ceph {
+    if is_ubuntu; then
+        sudo initctl emit ceph-mon id=$(hostname)
+        for id in $(sudo ceph -c ${CEPH_CONF_FILE} osd ls); do
+            sudo start ceph-osd id=${id}
+        done
+    else
+        sudo service ceph start
+    fi
+}
+
+# stop_ceph() - Stop running processes (non-screen)
+function stop_ceph {
+    if is_ubuntu; then
+        sudo service ceph-mon-all stop > /dev/null 2>&1
+        sudo service ceph-osd-all stop > /dev/null 2>&1
+    else
+        sudo service ceph stop > /dev/null 2>&1
+    fi
+}
+
+
+# Restore xtrace
+$XTRACE
+
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/cinder b/lib/cinder
index ce2a5c9..b30a036 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -46,6 +46,9 @@
 CINDER_API_PASTE_INI=$CINDER_CONF_DIR/api-paste.ini
 
 # Public facing bits
+if is_ssl_enabled_service "cinder" || is_service_enabled tls-proxy; then
+    CINDER_SERVICE_PROTOCOL="https"
+fi
 CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST}
 CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776}
 CINDER_SERVICE_PORT_INT=${CINDER_SERVICE_PORT_INT:-18776}
@@ -96,10 +99,10 @@
 # Source the enabled backends
 if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then
     for be in ${CINDER_ENABLED_BACKENDS//,/ }; do
-        BE_TYPE=${be%%:*}
-        BE_NAME=${be##*:}
-        if [[ -r $CINDER_BACKENDS/${BE_TYPE} ]]; then
-            source $CINDER_BACKENDS/${BE_TYPE}
+        be_type=${be%%:*}
+        be_name=${be##*:}
+        if [[ -r $CINDER_BACKENDS/${be_type} ]]; then
+            source $CINDER_BACKENDS/${be_type}
         fi
     done
 fi
@@ -120,7 +123,7 @@
 function cleanup_cinder {
     # ensure the volume group is cleared up because fails might
     # leave dead volumes in the group
-    TARGETS=$(sudo tgtadm --op show --mode target)
+    local targets=$(sudo tgtadm --op show --mode target)
     if [ $? -ne 0 ]; then
         # If tgt driver isn't running this won't work obviously
         # So check the response and restart if need be
@@ -130,11 +133,11 @@
         else
             restart_service tgtd
         fi
-        TARGETS=$(sudo tgtadm --op show --mode target)
+        targets=$(sudo tgtadm --op show --mode target)
     fi
 
-    if [[ -n "$TARGETS" ]]; then
-        iqn_list=( $(grep --no-filename -r iqn $SCSI_PERSIST_DIR | sed 's/<target //' | sed 's/>//') )
+    if [[ -n "$targets" ]]; then
+        local iqn_list=( $(grep --no-filename -r iqn $SCSI_PERSIST_DIR | sed 's/<target //' | sed 's/>//') )
         for i in "${iqn_list[@]}"; do
             echo removing iSCSI target: $i
             sudo tgt-admin --delete $i
@@ -148,11 +151,12 @@
     fi
 
     if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then
+        local be be_name be_type
         for be in ${CINDER_ENABLED_BACKENDS//,/ }; do
-            BE_TYPE=${be%%:*}
-            BE_NAME=${be##*:}
-            if type cleanup_cinder_backend_${BE_TYPE} >/dev/null 2>&1; then
-                cleanup_cinder_backend_${BE_TYPE} ${BE_NAME}
+            be_type=${be%%:*}
+            be_name=${be##*:}
+            if type cleanup_cinder_backend_${be_type} >/dev/null 2>&1; then
+                cleanup_cinder_backend_${be_type} ${be_name}
             fi
         done
     fi
@@ -161,7 +165,7 @@
 # configure_cinder_rootwrap() - configure Cinder's rootwrap
 function configure_cinder_rootwrap {
     # Set the paths of certain binaries
-    CINDER_ROOTWRAP=$(get_rootwrap_location cinder)
+    local cinder_rootwrap=$(get_rootwrap_location cinder)
 
     # Deploy new rootwrap filters files (owned by root).
     # Wipe any existing rootwrap.d files first
@@ -179,14 +183,14 @@
     sudo chown root:root $CINDER_CONF_DIR/rootwrap.conf
     sudo chmod 0644 $CINDER_CONF_DIR/rootwrap.conf
     # Specify rootwrap.conf as first parameter to rootwrap
-    ROOTWRAP_CSUDOER_CMD="$CINDER_ROOTWRAP $CINDER_CONF_DIR/rootwrap.conf *"
+    ROOTWRAP_CSUDOER_CMD="$cinder_rootwrap $CINDER_CONF_DIR/rootwrap.conf *"
 
     # Set up the rootwrap sudoers for cinder
-    TEMPFILE=`mktemp`
-    echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_CSUDOER_CMD" >$TEMPFILE
-    chmod 0440 $TEMPFILE
-    sudo chown root:root $TEMPFILE
-    sudo mv $TEMPFILE /etc/sudoers.d/cinder-rootwrap
+    local tempfile=`mktemp`
+    echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_CSUDOER_CMD" >$tempfile
+    chmod 0440 $tempfile
+    sudo chown root:root $tempfile
+    sudo mv $tempfile /etc/sudoers.d/cinder-rootwrap
 }
 
 # configure_cinder() - Set config files, create data dirs, etc
@@ -211,12 +215,7 @@
     inicomment $CINDER_API_PASTE_INI filter:authtoken admin_password
     inicomment $CINDER_API_PASTE_INI filter:authtoken signing_dir
 
-    iniset $CINDER_CONF keystone_authtoken identity_uri $KEYSTONE_AUTH_URI
-    iniset $CINDER_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA
-    iniset $CINDER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
-    iniset $CINDER_CONF keystone_authtoken admin_user cinder
-    iniset $CINDER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
-    iniset $CINDER_CONF keystone_authtoken signing_dir $CINDER_AUTH_CACHE_DIR
+    configure_auth_token_middleware $CINDER_CONF cinder $CINDER_AUTH_CACHE_DIR
 
     iniset $CINDER_CONF DEFAULT auth_strategy keystone
     iniset $CINDER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
@@ -237,22 +236,23 @@
     iniset $CINDER_CONF DEFAULT enable_v1_api true
 
     if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then
-        enabled_backends=""
-        default_type=""
+        local enabled_backends=""
+        local default_name=""
+        local be be_name be_type
         for be in ${CINDER_ENABLED_BACKENDS//,/ }; do
-            BE_TYPE=${be%%:*}
-            BE_NAME=${be##*:}
-            if type configure_cinder_backend_${BE_TYPE} >/dev/null 2>&1; then
-                configure_cinder_backend_${BE_TYPE} ${BE_NAME}
+            be_type=${be%%:*}
+            be_name=${be##*:}
+            if type configure_cinder_backend_${be_type} >/dev/null 2>&1; then
+                configure_cinder_backend_${be_type} ${be_name}
             fi
-            if [[ -z "$default_type" ]]; then
-                default_type=$BE_TYPE}
+            if [[ -z "$default_name" ]]; then
+                default_name=$be_name
             fi
-            enabled_backends+=$BE_NAME,
+            enabled_backends+=$be_name,
         done
         iniset $CINDER_CONF DEFAULT enabled_backends ${enabled_backends%,*}
-        if [[ -n "$default_type" ]]; then
-            iniset $CINDER_CONF DEFAULT default_volume_type ${enabled_backends%,*}
+        if [[ -n "$default_name" ]]; then
+            iniset $CINDER_CONF DEFAULT default_volume_type ${default_name}
         fi
     fi
 
@@ -300,10 +300,21 @@
             -e 's/snapshot_autoextend_percent =.*/snapshot_autoextend_percent = 20/' \
             /etc/lvm/lvm.conf
     fi
-    configure_API_version $CINDER_CONF $IDENTITY_API_VERSION
-    iniset $CINDER_CONF keystone_authtoken admin_user cinder
-    iniset $CINDER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
-    iniset $CINDER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
+
+    iniset $CINDER_CONF DEFAULT osapi_volume_workers "$API_WORKERS"
+
+    iniset $CINDER_CONF DEFAULT glance_api_servers "${GLANCE_SERVICE_PROTOCOL}://${GLANCE_HOSTPORT}"
+    if is_ssl_enabled_service glance || is_service_enabled tls-proxy; then
+        iniset $CINDER_CONF DEFAULT glance_protocol https
+    fi
+
+    # Register SSL certificates if provided
+    if is_ssl_enabled_service cinder; then
+        ensure_certificates CINDER
+
+        iniset $CINDER_CONF DEFAULT ssl_cert_file "$CINDER_SSL_CERT"
+        iniset $CINDER_CONF DEFAULT ssl_key_file "$CINDER_SSL_KEY"
+    fi
 
 }
 
@@ -316,28 +327,28 @@
 # Migrated from keystone_data.sh
 function create_cinder_accounts {
 
-    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
+    local service_tenant=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    local admin_role=$(openstack role list | awk "/ admin / { print \$2 }")
 
     # Cinder
     if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then
 
-        CINDER_USER=$(get_or_create_user "cinder" \
-            "$SERVICE_PASSWORD" $SERVICE_TENANT "cinder@example.com")
-        get_or_add_user_role $ADMIN_ROLE $CINDER_USER $SERVICE_TENANT
+        local cinder_user=$(get_or_create_user "cinder" \
+            "$SERVICE_PASSWORD" $service_tenant)
+        get_or_add_user_role $admin_role $cinder_user $service_tenant
 
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
 
-            CINDER_SERVICE=$(get_or_create_service "cinder" \
+            local cinder_service=$(get_or_create_service "cinder" \
                 "volume" "Cinder Volume Service")
-            get_or_create_endpoint $CINDER_SERVICE "$REGION_NAME" \
+            get_or_create_endpoint $cinder_service "$REGION_NAME" \
                 "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \
                 "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \
                 "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s"
 
-            CINDER_V2_SERVICE=$(get_or_create_service "cinderv2" \
+            local cinder_v2_service=$(get_or_create_service "cinderv2" \
                 "volumev2" "Cinder Volume Service V2")
-            get_or_create_endpoint $CINDER_V2_SERVICE "$REGION_NAME" \
+            get_or_create_endpoint $cinder_v2_service "$REGION_NAME" \
                 "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \
                 "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \
                 "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s"
@@ -354,6 +365,7 @@
 }
 
 # init_cinder() - Initialize database and volume group
+# Uses global ``NOVA_ENABLED_APIS``
 function init_cinder {
     # Force nova volumes off
     NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/osapi_volume,//")
@@ -367,11 +379,12 @@
     fi
 
     if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then
+        local be be_name be_type
         for be in ${CINDER_ENABLED_BACKENDS//,/ }; do
-            BE_TYPE=${be%%:*}
-            BE_NAME=${be##*:}
-            if type init_cinder_backend_${BE_TYPE} >/dev/null 2>&1; then
-                init_cinder_backend_${BE_TYPE} ${BE_NAME}
+            be_type=${be%%:*}
+            be_name=${be##*:}
+            if type init_cinder_backend_${be_type} >/dev/null 2>&1; then
+                init_cinder_backend_${be_type} ${be_name}
             fi
         done
     fi
@@ -403,6 +416,12 @@
 
 # start_cinder() - Start running processes, including screen
 function start_cinder {
+    local service_port=$CINDER_SERVICE_PORT
+    local service_protocol=$CINDER_SERVICE_PROTOCOL
+    if is_service_enabled tls-proxy; then
+        service_port=$CINDER_SERVICE_PORT_INT
+        service_protocol="http"
+    fi
     if is_service_enabled c-vol; then
         # Delete any old stack.conf
         sudo rm -f /etc/tgt/conf.d/stack.conf
@@ -427,15 +446,15 @@
         sudo tgtadm --mode system --op update --name debug --value on
     fi
 
-    screen_it c-api "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-api --config-file $CINDER_CONF"
+    run_process c-api "$CINDER_BIN_DIR/cinder-api --config-file $CINDER_CONF"
     echo "Waiting for Cinder API to start..."
-    if ! wait_for_service $SERVICE_TIMEOUT $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT; then
+    if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$CINDER_SERVICE_HOST:$service_port; then
         die $LINENO "c-api did not start"
     fi
 
-    screen_it c-sch "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-scheduler --config-file $CINDER_CONF"
-    screen_it c-bak "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-backup --config-file $CINDER_CONF"
-    screen_it c-vol "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-volume --config-file $CINDER_CONF"
+    run_process c-sch "$CINDER_BIN_DIR/cinder-scheduler --config-file $CINDER_CONF"
+    run_process c-bak "$CINDER_BIN_DIR/cinder-backup --config-file $CINDER_CONF"
+    run_process c-vol "$CINDER_BIN_DIR/cinder-volume --config-file $CINDER_CONF"
 
     # NOTE(jdg): For cinder, startup order matters.  To ensure that repor_capabilities is received
     # by the scheduler start the cinder-volume service last (or restart it) after the scheduler
@@ -450,8 +469,9 @@
 # stop_cinder() - Stop running processes
 function stop_cinder {
     # Kill the cinder screen windows
+    local serv
     for serv in c-api c-bak c-sch c-vol; do
-        screen_stop $serv
+        stop_process $serv
     done
 
     if is_service_enabled c-vol; then
@@ -467,14 +487,13 @@
 function create_volume_types {
     # Create volume types
     if is_service_enabled c-api && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then
+        local be be_name be_type
         for be in ${CINDER_ENABLED_BACKENDS//,/ }; do
-            BE_TYPE=${be%%:*}
-            BE_NAME=${be##*:}
-            if type configure_cinder_backend_${BE_TYPE} >/dev/null 2>&1; then
-                # openstack volume type create --property volume_backend_name="${BE_TYPE}" ${BE_NAME}
-                cinder type-create ${BE_NAME} && \
-                    cinder type-key ${BE_NAME} set volume_backend_name="${BE_NAME}"
-            fi
+            be_type=${be%%:*}
+            be_name=${be##*:}
+            # openstack volume type create --property volume_backend_name="${be_type}" ${be_name}
+            cinder type-create ${be_name} && \
+                cinder type-key ${be_name} set volume_backend_name="${be_name}"
         done
     fi
 }
diff --git a/lib/cinder_backends/ceph b/lib/cinder_backends/ceph
new file mode 100644
index 0000000..e9d2a02
--- /dev/null
+++ b/lib/cinder_backends/ceph
@@ -0,0 +1,79 @@
+# lib/cinder_backends/ceph
+# Configure the ceph backend
+
+# Enable with:
+#
+#   CINDER_ENABLED_BACKENDS+=,ceph:ceph
+#
+# Optional parameters:
+#   CINDER_BAK_CEPH_POOL=<pool-name>
+#   CINDER_BAK_CEPH_USER=<user>
+#   CINDER_BAK_CEPH_POOL_PG=<pg-num>
+#   CINDER_BAK_CEPH_POOL_PGP=<pgp-num>
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``cinder`` configurations
+
+# configure_ceph_backend_lvm - called from configure_cinder()
+
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+
+CINDER_BAK_CEPH_POOL=${CINDER_BAK_CEPH_POOL:-backups}
+CINDER_BAK_CEPH_POOL_PG=${CINDER_BAK_CEPH_POOL_PG:-8}
+CINDER_BAK_CEPH_POOL_PGP=${CINDER_BAK_CEPH_POOL_PGP:-8}
+CINDER_BAK_CEPH_USER=${CINDER_BAK_CEPH_USER:-cinder-bak}
+
+
+# Entry Points
+# ------------
+
+# configure_cinder_backend_ceph - Set config files, create data dirs, etc
+# configure_cinder_backend_ceph $name
+function configure_cinder_backend_ceph {
+    local be_name=$1
+
+    iniset $CINDER_CONF $be_name volume_backend_name $be_name
+    iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.rbd.RBDDriver"
+    iniset $CINDER_CONF $be_name rbd_ceph_conf "$CEPH_CONF"
+    iniset $CINDER_CONF $be_name rbd_pool "$CINDER_CEPH_POOL"
+    iniset $CINDER_CONF $be_name rbd_user "$CINDER_CEPH_USER"
+    iniset $CINDER_CONF $be_name rbd_uuid "$CINDER_CEPH_UUID"
+    iniset $CINDER_CONF $be_name rbd_flatten_volume_from_snapshot False
+    iniset $CINDER_CONF $be_name rbd_max_clone_depth 5
+    iniset $CINDER_CONF DEFAULT glance_api_version 2
+
+    if is_service_enabled c-bak; then
+        # Configure Cinder backup service options, ceph pool, ceph user and ceph key
+        sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP}
+        sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} size ${CEPH_REPLICAS}
+        if [[ $CEPH_REPLICAS -ne 1 ]]; then
+            sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
+        fi
+        sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
+        sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
+
+        iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph"
+        iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF"
+        iniset $CINDER_CONF DEFAULT backup_ceph_pool "$CINDER_BAK_CEPH_POOL"
+        iniset $CINDER_CONF DEFAULT backup_ceph_user "$CINDER_BAK_CEPH_USER"
+        iniset $CINDER_CONF DEFAULT backup_ceph_stripe_unit 0
+        iniset $CINDER_CONF DEFAULT backup_ceph_stripe_count 0
+        iniset $CINDER_CONF DEFAULT restore_discard_excess_bytes True
+    fi
+}
+
+# Restore xtrace
+$MY_XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/cinder_backends/glusterfs b/lib/cinder_backends/glusterfs
new file mode 100644
index 0000000..dd772a8
--- /dev/null
+++ b/lib/cinder_backends/glusterfs
@@ -0,0 +1,46 @@
+# lib/cinder_backends/glusterfs
+# Configure the glusterfs backend
+
+# Enable with:
+#
+#   CINDER_ENABLED_BACKENDS+=,glusterfs:<volume-type-name>
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``cinder`` configurations
+
+# CINDER_CONF
+# CINDER_CONF_DIR
+# CINDER_GLUSTERFS_SHARES - Contents of glusterfs shares config file
+
+# configure_cinder_backend_glusterfs - Configure Cinder for GlusterFS backends
+
+# Save trace setting
+GLUSTERFS_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Entry Points
+# ------------
+
+# configure_cinder_backend_glusterfs - Set config files, create data dirs, etc
+function configure_cinder_backend_glusterfs {
+    local be_name=$1
+    iniset $CINDER_CONF $be_name volume_backend_name $be_name
+    iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.glusterfs.GlusterfsDriver"
+    iniset $CINDER_CONF $be_name glusterfs_shares_config "$CINDER_CONF_DIR/glusterfs-shares-$be_name.conf"
+
+    if [[ -n "$CINDER_GLUSTERFS_SHARES" ]]; then
+        CINDER_GLUSTERFS_SHARES=$(echo $CINDER_GLUSTERFS_SHARES | tr ";" "\n")
+        echo "$CINDER_GLUSTERFS_SHARES" | tee "$CINDER_CONF_DIR/glusterfs-shares-$be_name.conf"
+    fi
+}
+
+
+# Restore xtrace
+$GLUSTERFS_XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/cinder_backends/lvm b/lib/cinder_backends/lvm
index 324c323..8f8ab79 100644
--- a/lib/cinder_backends/lvm
+++ b/lib/cinder_backends/lvm
@@ -112,6 +112,7 @@
     local lv_prefix=$2
 
     # Clean out existing volumes
+    local lv
     for lv in $(sudo lvs --noheadings -o lv_name $vg 2>/dev/null); do
         # lv_prefix prefixes the LVs we want
         if [[ "${lv#$lv_prefix}" != "$lv" ]]; then
@@ -132,9 +133,9 @@
     # of the backing file
     if [[ -z "$(sudo lvs --noheadings -o lv_name $vg 2>/dev/null)" ]]; then
         # if the backing physical device is a loop device, it was probably setup by devstack
-        VG_DEV=$(sudo losetup -j $backing_file | awk -F':' '/backing-file/ { print $1}')
-        if [[ -n "$VG_DEV" ]] && [[ -e "$VG_DEV" ]]; then
-            sudo losetup -d $VG_DEV
+        local vg_dev=$(sudo losetup -j $backing_file | awk -F':' '/backing-file/ { print $1}')
+        if [[ -n "$vg_dev" ]] && [[ -e "$vg_dev" ]]; then
+            sudo losetup -d $vg_dev
             rm -f $backing_file
         fi
     fi
@@ -159,11 +160,11 @@
         if [ -z "$VOLUME_BACKING_DEVICE" ]; then
             # Only create if the file doesn't already exists
             [[ -f $backing_file ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $backing_file
-            DEV=`sudo losetup -f --show $backing_file`
+            local vg_dev=`sudo losetup -f --show $backing_file`
 
             # Only create if the loopback device doesn't contain $VOLUME_GROUP
             if ! sudo vgs $vg_name; then
-                sudo vgcreate $vg_name $DEV
+                sudo vgcreate $vg_name $vg_dev
             fi
         else
             sudo vgcreate $vg_name $VOLUME_BACKING_DEVICE
diff --git a/lib/cinder_backends/solidfire b/lib/cinder_backends/solidfire
new file mode 100644
index 0000000..95ffce1
--- /dev/null
+++ b/lib/cinder_backends/solidfire
@@ -0,0 +1,47 @@
+# lib/cinder_backends/solidfire
+# Configure the solidfire driver
+
+# Enable with:
+#
+#   CINDER_ENABLED_BACKENDS+=,solidfire:<volume-type-name>
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``cinder`` configurations
+
+# CINDER_CONF
+
+# configure_cinder_driver - make configuration changes, including those to other services
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Entry Points
+# ------------
+
+# configure_cinder_backend_solidfire - Set config files, create data dirs, etc
+function configure_cinder_backend_solidfire {
+    # To use SolidFire, set the following in local.conf:
+    # CINDER_ENABLED_BACKENDS+=,solidfire:<volume-type-name>
+    # SAN_IP=<mvip>
+    # SAN_LOGIN=<cluster-admin-account>
+    # SAN_PASSWORD=<cluster-admin-password>
+
+    local be_name=$1
+    iniset $CINDER_CONF $be_name volume_backend_name $be_name
+    iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.solidfire.SolidFireDriver"
+    iniset $CINDER_CONF $be_name san_ip $SAN_IP
+    iniset $CINDER_CONF $be_name san_login $SAN_LOGIN
+    iniset $CINDER_CONF $be_name san_password $SAN_PASSWORD
+}
+
+
+# Restore xtrace
+$MY_XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/cinder_backends/vmdk b/lib/cinder_backends/vmdk
new file mode 100644
index 0000000..b32c4b2
--- /dev/null
+++ b/lib/cinder_backends/vmdk
@@ -0,0 +1,45 @@
+# lib/cinder_backends/vmdk
+# Configure the VMware vmdk backend
+
+# Enable with:
+#
+#   CINDER_ENABLED_BACKENDS+=,vmdk:<volume-type-name>
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``cinder`` configurations
+
+# configure_cinder_backend_vmdk - Configure Cinder for VMware vmdk backends
+
+# Save trace setting
+VMDK_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Entry Points
+# ------------
+
+# configure_cinder_backend_vmdk - Set config files, create data dirs, etc
+function configure_cinder_backend_vmdk {
+    # To use VMware vmdk backend, set the following in local.conf:
+    # CINDER_ENABLED_BACKENDS+=,vmdk:<volume-type-name>
+    # VMWAREAPI_IP=<vcenter-ip>
+    # VMWAREAPI_USER=<vcenter-admin-account>
+    # VMWAREAPI_PASSWORD=<vcenter-admin-password>
+
+    local be_name=$1
+    iniset $CINDER_CONF $be_name volume_backend_name $be_name
+    iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver"
+    iniset $CINDER_CONF $be_name vmware_host_ip "$VMWAREAPI_IP"
+    iniset $CINDER_CONF $be_name vmware_host_username "$VMWAREAPI_USER"
+    iniset $CINDER_CONF $be_name vmware_host_password "$VMWAREAPI_PASSWORD"
+}
+
+
+# Restore xtrace
+$VMDK_XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/cinder_backends/xiv b/lib/cinder_backends/xiv
new file mode 100644
index 0000000..dbdb96c
--- /dev/null
+++ b/lib/cinder_backends/xiv
@@ -0,0 +1,84 @@
+# Copyright 2014 IBM Corp.
+# Copyright (c) 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+# Authors:
+#   Alon Marx <alonma@il.ibm.com>
+
+# lib/cinder_plugins/xiv
+# Configure the xiv_ds8k driver for xiv testing
+
+# Enable xiv_ds8k driver for xiv with:
+#
+#   CINDER_ENABLED_BACKENDS+=,xiv:<volume-type-name>
+#   XIV_DRIVER_VERSION=<version-string>
+#   SAN_IP=<storage-ip-or-hostname>
+#   SAN_LOGIN=<storage-admin-account>
+#   SAN_PASSWORD=<storage-admin-password>
+#   SAN_CLUSTERNAME=<cluster-name>
+#   CONNECTION_TYPE=<connection-type> iscsi|fc
+#   XIV_CHAP=<chap-type> disabled|enabled
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``cinder`` configurations
+
+# configure_cinder_backend_xiv - Configure Cinder for xiv backends
+
+# Save trace setting
+XIV_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+# Defaults
+# --------
+# Set up default directories
+
+
+# Entry Points
+# ------------
+
+# configure_cinder_backend_xiv - Set config files, create data dirs, etc
+function configure_cinder_backend_xiv {
+
+    local be_name=$1
+
+    python -c 'from xiv_ds8k_openstack.xiv_nova_proxy import XIVNovaProxy'
+    if [ $? -ne 0 ]; then
+        die $LINENO "XIV_DS8K driver is missing. Please install first"
+    fi
+
+    # For reference:
+    # XIV_DS8K_BACKEND='IBM-XIV_'${SAN_IP}'_'${SAN_CLUSTERNAME}'_'${CONNECTION_TYPE}
+    iniset $CINDER_CONF DEFAULT xiv_ds8k_driver_version $XIV_DRIVER_VERSION
+
+    iniset $CINDER_CONF $be_name san_ip $SAN_IP
+    iniset $CINDER_CONF $be_name san_login $SAN_LOGIN
+    iniset $CINDER_CONF $be_name san_password $SAN_PASSWORD
+    iniset $CINDER_CONF $be_name san_clustername $SAN_CLUSTERNAME
+    iniset $CINDER_CONF $be_name xiv_ds8k_connection_type $CONNECTION_TYPE
+    iniset $CINDER_CONF $be_name volume_backend_name $be_name
+    iniset $CINDER_CONF $be_name volume_driver 'cinder.volume.drivers.ibm.xiv_ds8k.XIVDS8KDriver'
+    iniset $CINDER_CONF $be_name xiv_ds8k_proxy 'xiv_ds8k_openstack.xiv_nova_proxy.XIVNovaProxy'
+    iniset $CINDER_CONF $be_name xiv_chap $XIV_CHAP
+}
+
+# Restore xtrace
+$XIV_XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
+
diff --git a/lib/cinder_plugins/solidfire b/lib/cinder_plugins/solidfire
deleted file mode 100644
index 2c970b5..0000000
--- a/lib/cinder_plugins/solidfire
+++ /dev/null
@@ -1,48 +0,0 @@
-# lib/cinder_plugins/solidfire
-# Configure the solidfire driver
-
-# Enable with:
-#
-#   CINDER_DRIVER=solidfire
-
-# Dependencies:
-#
-# - ``functions`` file
-# - ``cinder`` configurations
-
-# configure_cinder_driver - make configuration changes, including those to other services
-
-# Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-
-# Defaults
-# --------
-
-# Set up default directories
-
-
-# Entry Points
-# ------------
-
-# configure_cinder_driver - Set config files, create data dirs, etc
-function configure_cinder_driver {
-    # To use solidfire, set the following in localrc:
-    # CINDER_DRIVER=solidfire
-    # SAN_IP=<mvip>
-    # SAN_LOGIN=<cluster-admin-account>
-    # SAN_PASSWORD=<cluster-admin-password>
-
-    iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.solidfire.SolidFireDriver"
-    iniset $CINDER_CONF DEFAULT san_ip $SAN_IP
-    iniset $CINDER_CONF DEFAULT san_login $SAN_LOGIN
-    iniset $CINDER_CONF DEFAULT san_password $SAN_PASSWORD
-}
-
-# Restore xtrace
-$MY_XTRACE
-
-# Local variables:
-# mode: shell-script
-# End:
diff --git a/lib/config b/lib/config
index 67d788c..0baa4cc 100644
--- a/lib/config
+++ b/lib/config
@@ -110,6 +110,7 @@
 
     [[ -r $localfile ]] || return 0
 
+    local configfile group
     for group in $matchgroups; do
         for configfile in $(get_meta_section_files $localfile $group); do
             if [[ -d $(dirname $(eval "echo $configfile")) ]]; then
diff --git a/lib/database b/lib/database
index 0661049..e226515 100644
--- a/lib/database
+++ b/lib/database
@@ -89,7 +89,7 @@
     # a multi-node DevStack installation.
 
     # NOTE: Don't specify ``/db`` in this string so we can use it for multiple services
-    BASE_SQL_CONN=${BASE_SQL_CONN:-${DATABASE_TYPE}://$DATABASE_USER:$DATABASE_PASSWORD@$DATABASE_HOST}
+    BASE_SQL_CONN=${BASE_SQL_CONN:-$(get_database_type)://$DATABASE_USER:$DATABASE_PASSWORD@$DATABASE_HOST}
 
     return 0
 }
@@ -120,6 +120,14 @@
     database_connection_url_$DATABASE_TYPE $db
 }
 
+function get_database_type {
+    if [[ -n "${SQLALCHEMY_DATABASE_DRIVER}" ]]; then
+        echo "${DATABASE_TYPE}+${SQLALCHEMY_DATABASE_DRIVER}"
+    else
+        echo "${DATABASE_TYPE}"
+    fi
+}
+
 
 # Restore xtrace
 $XTRACE
diff --git a/lib/databases/mysql b/lib/databases/mysql
index 0ccfce5..67bf85a 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -47,22 +47,22 @@
 }
 
 function configure_database_mysql {
-    local slow_log
+    local my_conf mysql slow_log
     echo_summary "Configuring and starting MySQL"
 
     if is_ubuntu; then
-        MY_CONF=/etc/mysql/my.cnf
-        MYSQL=mysql
+        my_conf=/etc/mysql/my.cnf
+        mysql=mysql
     elif is_fedora; then
         if [[ $DISTRO =~ (rhel7) ]]; then
-            MYSQL=mariadb
+            mysql=mariadb
         else
-            MYSQL=mysqld
+            mysql=mysqld
         fi
-        MY_CONF=/etc/my.cnf
+        my_conf=/etc/my.cnf
     elif is_suse; then
-        MY_CONF=/etc/my.cnf
-        MYSQL=mysql
+        my_conf=/etc/my.cnf
+        mysql=mysql
     else
         exit_distro_not_supported "mysql configuration"
     fi
@@ -70,7 +70,7 @@
     # Start mysql-server
     if is_fedora || is_suse; then
         # service is not started by default
-        start_service $MYSQL
+        start_service $mysql
     fi
 
     # Set the root password - only works the first time. For Ubuntu, we already
@@ -87,9 +87,9 @@
     # Change ‘bind-address’ from localhost (127.0.0.1) to any (0.0.0.0) and
     # set default db type to InnoDB
     sudo bash -c "source $TOP_DIR/functions && \
-        iniset $MY_CONF mysqld bind-address 0.0.0.0 && \
-        iniset $MY_CONF mysqld sql_mode STRICT_ALL_TABLES && \
-        iniset $MY_CONF mysqld default-storage-engine InnoDB"
+        iniset $my_conf mysqld bind-address 0.0.0.0 && \
+        iniset $my_conf mysqld sql_mode STRICT_ALL_TABLES && \
+        iniset $my_conf mysqld default-storage-engine InnoDB"
 
 
     if [[ "$DATABASE_QUERY_LOGGING" == "True" ]]; then
@@ -102,19 +102,19 @@
         sudo sed -e '/log.slow.queries/d' \
             -e '/long.query.time/d' \
             -e '/log.queries.not.using.indexes/d' \
-            -i $MY_CONF
+            -i $my_conf
 
         # Turn on slow query log, log all queries (any query taking longer than
         # 0 seconds) and log all non-indexed queries
         sudo bash -c "source $TOP_DIR/functions && \
-            iniset $MY_CONF mysqld slow-query-log 1 && \
-            iniset $MY_CONF mysqld slow-query-log-file $slow_log && \
-            iniset $MY_CONF mysqld long-query-time 0 && \
-            iniset $MY_CONF mysqld log-queries-not-using-indexes 1"
+            iniset $my_conf mysqld slow-query-log 1 && \
+            iniset $my_conf mysqld slow-query-log-file $slow_log && \
+            iniset $my_conf mysqld long-query-time 0 && \
+            iniset $my_conf mysqld log-queries-not-using-indexes 1"
 
     fi
 
-    restart_service $MYSQL
+    restart_service $mysql
 }
 
 function install_database_mysql {
diff --git a/lib/databases/postgresql b/lib/databases/postgresql
index 6e85d6e..fb6d304 100644
--- a/lib/databases/postgresql
+++ b/lib/databases/postgresql
@@ -42,11 +42,12 @@
 }
 
 function configure_database_postgresql {
+    local pg_conf pg_dir pg_hba root_roles
     echo_summary "Configuring and starting PostgreSQL"
     if is_fedora; then
-        PG_HBA=/var/lib/pgsql/data/pg_hba.conf
-        PG_CONF=/var/lib/pgsql/data/postgresql.conf
-        if ! sudo [ -e $PG_HBA ]; then
+        pg_hba=/var/lib/pgsql/data/pg_hba.conf
+        pg_conf=/var/lib/pgsql/data/postgresql.conf
+        if ! sudo [ -e $pg_hba ]; then
             if ! [[ $DISTRO =~ (rhel6) ]]; then
                 sudo postgresql-setup initdb
             else
@@ -54,25 +55,25 @@
             fi
         fi
     elif is_ubuntu; then
-        PG_DIR=`find /etc/postgresql -name pg_hba.conf|xargs dirname`
-        PG_HBA=$PG_DIR/pg_hba.conf
-        PG_CONF=$PG_DIR/postgresql.conf
+        pg_dir=`find /etc/postgresql -name pg_hba.conf|xargs dirname`
+        pg_hba=$pg_dir/pg_hba.conf
+        pg_conf=$pg_dir/postgresql.conf
     elif is_suse; then
-        PG_HBA=/var/lib/pgsql/data/pg_hba.conf
-        PG_CONF=/var/lib/pgsql/data/postgresql.conf
+        pg_hba=/var/lib/pgsql/data/pg_hba.conf
+        pg_conf=/var/lib/pgsql/data/postgresql.conf
         # initdb is called when postgresql is first started
-        sudo [ -e $PG_HBA ] || start_service postgresql
+        sudo [ -e $pg_hba ] || start_service postgresql
     else
         exit_distro_not_supported "postgresql configuration"
     fi
     # Listen on all addresses
-    sudo sed -i "/listen_addresses/s/.*/listen_addresses = '*'/" $PG_CONF
+    sudo sed -i "/listen_addresses/s/.*/listen_addresses = '*'/" $pg_conf
     # Set max_connections
-    sudo sed -i "/max_connections/s/.*/max_connections = $MAX_DB_CONNECTIONS/" $PG_CONF
+    sudo sed -i "/max_connections/s/.*/max_connections = $MAX_DB_CONNECTIONS/" $pg_conf
     # Do password auth from all IPv4 clients
-    sudo sed -i "/^host/s/all\s\+127.0.0.1\/32\s\+ident/$DATABASE_USER\t0.0.0.0\/0\tpassword/" $PG_HBA
+    sudo sed -i "/^host/s/all\s\+127.0.0.1\/32\s\+ident/$DATABASE_USER\t0.0.0.0\/0\tpassword/" $pg_hba
     # Do password auth for all IPv6 clients
-    sudo sed -i "/^host/s/all\s\+::1\/128\s\+ident/$DATABASE_USER\t::0\/0\tpassword/" $PG_HBA
+    sudo sed -i "/^host/s/all\s\+::1\/128\s\+ident/$DATABASE_USER\t::0\/0\tpassword/" $pg_hba
     restart_service postgresql
 
     # Create the role if it's not here or else alter it.
@@ -86,14 +87,14 @@
 
 function install_database_postgresql {
     echo_summary "Installing postgresql"
-    PGPASS=$HOME/.pgpass
-    if [[ ! -e $PGPASS ]]; then
-        cat <<EOF > $PGPASS
+    local pgpass=$HOME/.pgpass
+    if [[ ! -e $pgpass ]]; then
+        cat <<EOF > $pgpass
 *:*:*:$DATABASE_USER:$DATABASE_PASSWORD
 EOF
-        chmod 0600 $PGPASS
+        chmod 0600 $pgpass
     else
-        sed -i "s/:root:\w\+/:root:$DATABASE_PASSWORD/" $PGPASS
+        sed -i "s/:root:\w\+/:root:$DATABASE_PASSWORD/" $pgpass
     fi
     if is_ubuntu; then
         install_package postgresql
diff --git a/lib/dib b/lib/dib
new file mode 100644
index 0000000..3a1167f
--- /dev/null
+++ b/lib/dib
@@ -0,0 +1,133 @@
+# lib/dib
+# Install and build images with **diskimage-builder**
+
+# Dependencies:
+#
+# - functions
+# - DEST, DATA_DIR must be defined
+
+# stack.sh
+# ---------
+# - install_dib
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+# Defaults
+# --------
+
+# set up default directories
+DIB_DIR=$DEST/diskimage-builder
+TIE_DIR=$DEST/tripleo-image-elements
+DIB_IMAGE_CACHE=$DATA_DIR/diskimage-builder/image-create
+DIB_PIP_REPO=$DATA_DIR/diskimage-builder/pip-repo
+DIB_PIP_REPO_PORT=${DIB_PIP_REPO_PORT:-8899}
+OCC_DIR=$DEST/os-collect-config
+ORC_DIR=$DEST/os-refresh-config
+OAC_DIR=$DEST/os-apply-config
+
+# Functions
+# ---------
+
+# install_dib() - Collect source and prepare
+function install_dib {
+    git_clone $DIB_REPO $DIB_DIR $DIB_BRANCH
+    pushd $DIB_DIR
+    pip_install ./
+    popd
+
+    git_clone $TIE_REPO $TIE_DIR $TIE_BRANCH
+    git_clone $OCC_REPO $OCC_DIR $OCC_BRANCH
+    git_clone $ORC_REPO $ORC_DIR $ORC_BRANCH
+    git_clone $OAC_REPO $OAC_DIR $OAC_BRANCH
+    mkdir -p $DIB_IMAGE_CACHE
+}
+
+# build_dib_pip_repo() - Builds a local pip repo from local projects
+function build_dib_pip_repo {
+    local project_dirs=$1
+    local projpath proj package
+
+    rm -rf $DIB_PIP_REPO
+    mkdir -p $DIB_PIP_REPO
+
+    echo "<html><body>" > $DIB_PIP_REPO/index.html
+    for projpath in $project_dirs; do
+        proj=$(basename $projpath)
+        mkdir -p $DIB_PIP_REPO/$proj
+        pushd $projpath
+        rm -rf dist
+        python setup.py sdist
+        pushd dist
+        package=$(ls *)
+        mv $package $DIB_PIP_REPO/$proj/$package
+        popd
+
+        echo "<html><body><a href=\"$package\">$package</a></body></html>" > $DIB_PIP_REPO/$proj/index.html
+        echo "<a href=\"$proj\">$proj</a><br/>" >> $DIB_PIP_REPO/index.html
+
+        popd
+    done
+
+    echo "</body></html>" >> $DIB_PIP_REPO/index.html
+
+    local dib_pip_repo_apache_conf=$(apache_site_config_for dib_pip_repo)
+
+    sudo cp $FILES/apache-dib-pip-repo.template $dib_pip_repo_apache_conf
+    sudo sed -e "
+        s|%DIB_PIP_REPO%|$DIB_PIP_REPO|g;
+        s|%DIB_PIP_REPO_PORT%|$DIB_PIP_REPO_PORT|g;
+        s|%APACHE_NAME%|$APACHE_NAME|g;
+    " -i $dib_pip_repo_apache_conf
+    enable_apache_site dib_pip_repo
+}
+
+# disk_image_create_upload() - Creates and uploads a diskimage-builder built image
+function disk_image_create_upload {
+
+    local image_name=$1
+    local image_elements=$2
+    local elements_path=$3
+
+    local image_path=$TOP_DIR/files/$image_name.qcow2
+
+    # Set the local pip repo as the primary index mirror so the
+    # image is built with local packages
+    local pypi_mirror_url=http://$SERVICE_HOST:$DIB_PIP_REPO_PORT/
+    local pypi_mirror_url_1
+
+    if [ -a $HOME/.pip/pip.conf ]; then
+        # Add the current pip.conf index-url as an extra-index-url
+        # in the image build
+        pypi_mirror_url_1=$(iniget $HOME/.pip/pip.conf global index-url)
+    else
+        # If no pip.conf, set upstream pypi as an extra mirror
+        # (this also sets the .pydistutils.cfg index-url)
+        pypi_mirror_url_1=http://pypi.python.org/simple
+    fi
+
+    # The disk-image-create command to run
+    ELEMENTS_PATH=$elements_path \
+    PYPI_MIRROR_URL=$pypi_mirror_url \
+    PYPI_MIRROR_URL_1=$pypi_mirror_url_1 \
+    disk-image-create -a amd64 $image_elements \
+        --image-cache $DIB_IMAGE_CACHE \
+        -o $image_path
+
+    local token=$(keystone token-get | grep ' id ' | get_field 2)
+    die_if_not_set $LINENO token "Keystone fail to get token"
+
+    glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT \
+        image-create --name $image_name --is-public True \
+        --container-format=bare --disk-format qcow2 \
+        < $image_path
+}
+
+# Restore xtrace
+$XTRACE
+
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/dstat b/lib/dstat
new file mode 100644
index 0000000..a2c522c
--- /dev/null
+++ b/lib/dstat
@@ -0,0 +1,41 @@
+# lib/apache
+# Functions to start and stop dstat
+
+# Dependencies:
+#
+# - ``functions`` file
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# - start_dstat
+# - stop_dstat
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+# for DSTAT logging
+DSTAT_FILE=${DSTAT_FILE:-"dstat.txt"}
+
+
+# start_dstat() - Start running processes, including screen
+function start_dstat {
+    # A better kind of sysstat, with the top process per time slice
+    DSTAT_OPTS="-tcmndrylp --top-cpu-adv"
+    if [[ -n ${SCREEN_LOGDIR} ]]; then
+        screen_it dstat "cd $TOP_DIR; dstat $DSTAT_OPTS | tee $SCREEN_LOGDIR/$DSTAT_FILE"
+    else
+        screen_it dstat "dstat $DSTAT_OPTS"
+    fi
+}
+
+# stop_dstat() stop dstat process
+function stop_dstat {
+    screen_stop dstat
+}
+
+# Restore xtrace
+$XTRACE
diff --git a/lib/gantt b/lib/gantt
index 8db2ca1..485613f 100644
--- a/lib/gantt
+++ b/lib/gantt
@@ -77,14 +77,14 @@
 # start_gantt() - Start running processes, including screen
 function start_gantt {
     if is_service_enabled gantt; then
-        screen_it gantt "cd $GANTT_DIR && $GANTT_BIN_DIR/gantt-scheduler --config-file $GANTT_CONF"
+        run_process gantt "$GANTT_BIN_DIR/gantt-scheduler --config-file $GANTT_CONF"
     fi
 }
 
 # stop_gantt() - Stop running processes
 function stop_gantt {
     echo "Stop Gantt"
-    screen_stop gantt
+    stop_process gantt
 }
 
 # Restore xtrace
diff --git a/lib/glance b/lib/glance
index 475bb48..4194842 100644
--- a/lib/glance
+++ b/lib/glance
@@ -28,12 +28,14 @@
 
 # Set up default directories
 GLANCE_DIR=$DEST/glance
+GLANCE_STORE_DIR=$DEST/glance_store
 GLANCECLIENT_DIR=$DEST/python-glanceclient
 GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache}
 GLANCE_IMAGE_DIR=${GLANCE_IMAGE_DIR:=$DATA_DIR/glance/images}
 GLANCE_AUTH_CACHE_DIR=${GLANCE_AUTH_CACHE_DIR:-/var/cache/glance}
 
 GLANCE_CONF_DIR=${GLANCE_CONF_DIR:-/etc/glance}
+GLANCE_METADEF_DIR=$GLANCE_CONF_DIR/metadefs
 GLANCE_REGISTRY_CONF=$GLANCE_CONF_DIR/glance-registry.conf
 GLANCE_API_CONF=$GLANCE_CONF_DIR/glance-api.conf
 GLANCE_REGISTRY_PASTE_INI=$GLANCE_CONF_DIR/glance-registry-paste.ini
@@ -49,8 +51,18 @@
     GLANCE_BIN_DIR=$(get_python_exec_prefix)
 fi
 
+if is_ssl_enabled_service "glance" || is_service_enabled tls-proxy; then
+    GLANCE_SERVICE_PROTOCOL="https"
+fi
+
 # Glance connection info.  Note the port must be specified.
-GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$SERVICE_HOST:9292}
+GLANCE_SERVICE_HOST=${GLANCE_SERVICE_HOST:-$SERVICE_HOST}
+GLANCE_SERVICE_PORT=${GLANCE_SERVICE_PORT:-9292}
+GLANCE_SERVICE_PORT_INT=${GLANCE_SERVICE_PORT_INT:-19292}
+GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT}
+GLANCE_SERVICE_PROTOCOL=${GLANCE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
+GLANCE_REGISTRY_PORT=${GLANCE_REGISTRY_PORT:-9191}
+GLANCE_REGISTRY_PORT_INT=${GLANCE_REGISTRY_PORT_INT:-19191}
 
 # Tell Tempest this project is present
 TEMPEST_SERVICES+=,glance
@@ -81,6 +93,11 @@
     fi
     sudo chown $STACK_USER $GLANCE_CONF_DIR
 
+    if [[ ! -d $GLANCE_METADEF_DIR ]]; then
+        sudo mkdir -p $GLANCE_METADEF_DIR
+    fi
+    sudo chown $STACK_USER $GLANCE_METADEF_DIR
+
     # Copy over our glance configurations and update them
     cp $GLANCE_DIR/etc/glance-registry.conf $GLANCE_REGISTRY_CONF
     iniset $GLANCE_REGISTRY_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
@@ -89,38 +106,38 @@
     iniset $GLANCE_REGISTRY_CONF DEFAULT sql_connection $dburl
     iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG
     iniset $GLANCE_REGISTRY_CONF paste_deploy flavor keystone
-    iniset $GLANCE_REGISTRY_CONF keystone_authtoken identity_uri $KEYSTONE_AUTH_URI
-    iniset $GLANCE_REGISTRY_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA
-    configure_API_version $GLANCE_REGISTRY_CONF $IDENTITY_API_VERSION
-    iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
-    iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_user glance
-    iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
-    iniset $GLANCE_REGISTRY_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/registry
+    configure_auth_token_middleware $GLANCE_REGISTRY_CONF glance $GLANCE_AUTH_CACHE_DIR/registry
+    if is_service_enabled qpid || [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then
+        iniset $GLANCE_REGISTRY_CONF DEFAULT notification_driver messaging
+    fi
+    iniset_rpc_backend glance $GLANCE_REGISTRY_CONF DEFAULT
 
     cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF
     iniset $GLANCE_API_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
     inicomment $GLANCE_API_CONF DEFAULT log_file
     iniset $GLANCE_API_CONF DEFAULT sql_connection $dburl
     iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG
-    iniset $GLANCE_API_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/
     iniset $GLANCE_API_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/
     iniset $GLANCE_API_CONF paste_deploy flavor keystone+cachemanagement
-    iniset $GLANCE_API_CONF keystone_authtoken identity_uri $KEYSTONE_AUTH_URI
-    iniset $GLANCE_API_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA
-    configure_API_version $GLANCE_API_CONF $IDENTITY_API_VERSION
-    iniset $GLANCE_API_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
-    iniset $GLANCE_API_CONF keystone_authtoken admin_user glance
-    iniset $GLANCE_API_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
+    configure_auth_token_middleware $GLANCE_API_CONF glance $GLANCE_AUTH_CACHE_DIR/api
     if is_service_enabled qpid || [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then
         iniset $GLANCE_API_CONF DEFAULT notification_driver messaging
     fi
     iniset_rpc_backend glance $GLANCE_API_CONF DEFAULT
-    iniset $GLANCE_API_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/api
     if [ "$VIRT_DRIVER" = 'xenserver' ]; then
         iniset $GLANCE_API_CONF DEFAULT container_formats "ami,ari,aki,bare,ovf,tgz"
         iniset $GLANCE_API_CONF DEFAULT disk_formats "ami,ari,aki,vhd,raw,iso"
     fi
 
+    # Store specific configs
+    iniset $GLANCE_API_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/
+
+    # NOTE(flaper87): Until Glance is fully migrated, set these configs in both
+    # sections.
+    iniset $GLANCE_API_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/
+
+    iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS"
+
     # Store the images in swift if enabled.
     if is_service_enabled s-proxy; then
         iniset $GLANCE_API_CONF DEFAULT default_store swift
@@ -130,6 +147,35 @@
         iniset $GLANCE_API_CONF DEFAULT swift_store_create_container_on_put True
 
         iniset $GLANCE_API_CONF DEFAULT known_stores "glance.store.filesystem.Store, glance.store.http.Store, glance.store.swift.Store"
+
+        # NOTE(flaper87): Until Glance is fully migrated, set these configs in both
+        # sections.
+        iniset $GLANCE_API_CONF glance_store default_store swift
+        iniset $GLANCE_API_CONF glance_store swift_store_auth_address $KEYSTONE_SERVICE_URI/v2.0/
+        iniset $GLANCE_API_CONF glance_store swift_store_user $SERVICE_TENANT_NAME:glance-swift
+        iniset $GLANCE_API_CONF glance_store swift_store_key $SERVICE_PASSWORD
+        iniset $GLANCE_API_CONF glance_store swift_store_create_container_on_put True
+        iniset $GLANCE_API_CONF glance_store stores "file, http, swift"
+    fi
+
+    if is_service_enabled tls-proxy; then
+        iniset $GLANCE_API_CONF DEFAULT bind_port $GLANCE_SERVICE_PORT_INT
+        iniset $GLANCE_REGISTRY_CONF DEFAULT bind_port $GLANCE_REGISTRY_PORT_INT
+    fi
+
+    # Register SSL certificates if provided
+    if is_ssl_enabled_service glance; then
+        ensure_certificates GLANCE
+
+        iniset $GLANCE_API_CONF DEFAULT cert_file "$GLANCE_SSL_CERT"
+        iniset $GLANCE_API_CONF DEFAULT key_file "$GLANCE_SSL_KEY"
+
+        iniset $GLANCE_REGISTRY_CONF DEFAULT cert_file "$GLANCE_SSL_CERT"
+        iniset $GLANCE_REGISTRY_CONF DEFAULT key_file "$GLANCE_SSL_KEY"
+    fi
+
+    if is_ssl_enabled_service glance || is_service_enabled tls-proxy; then
+        iniset $GLANCE_API_CONF DEFAULT registry_client_protocol https
     fi
 
     cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI
@@ -140,7 +186,6 @@
     iniset $GLANCE_CACHE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
     inicomment $GLANCE_CACHE_CONF DEFAULT log_file
     iniset $GLANCE_CACHE_CONF DEFAULT use_syslog $SYSLOG
-    iniset $GLANCE_CACHE_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/
     iniset $GLANCE_CACHE_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/
     iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_url
     iniset $GLANCE_CACHE_CONF DEFAULT auth_url $KEYSTONE_AUTH_URI/v2.0
@@ -151,8 +196,24 @@
     iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_password
     iniset $GLANCE_CACHE_CONF DEFAULT admin_password $SERVICE_PASSWORD
 
+    # Store specific confs
+    # NOTE(flaper87): Until Glance is fully migrated, set these configs in both
+    # sections.
+    iniset $GLANCE_CACHE_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/
+    iniset $GLANCE_CACHE_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/
+
     cp -p $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON
     cp -p $GLANCE_DIR/etc/schema-image.json $GLANCE_SCHEMA_JSON
+
+    cp -p $GLANCE_DIR/etc/metadefs/*.json $GLANCE_METADEF_DIR
+
+    if is_ssl_enabled_service "cinder" || is_service_enabled tls-proxy; then
+        CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST}
+        CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776}
+
+        iniset $GLANCE_API_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/%(project_id)s"
+        iniset $GLANCE_CACHE_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/%(project_id)s"
+    fi
 }
 
 # create_glance_accounts() - Set up common required glance accounts
@@ -165,27 +226,27 @@
 function create_glance_accounts {
     if is_service_enabled g-api; then
 
-        GLANCE_USER=$(get_or_create_user "glance" \
-            "$SERVICE_PASSWORD" $SERVICE_TENANT_NAME "glance@example.com")
-        get_or_add_user_role service $GLANCE_USER $SERVICE_TENANT_NAME
+        local glance_user=$(get_or_create_user "glance" \
+            "$SERVICE_PASSWORD" $SERVICE_TENANT_NAME)
+        get_or_add_user_role service $glance_user $SERVICE_TENANT_NAME
 
         # required for swift access
         if is_service_enabled s-proxy; then
 
-            GLANCE_SWIFT_USER=$(get_or_create_user "glance-swift" \
+            local glance_swift_user=$(get_or_create_user "glance-swift" \
                 "$SERVICE_PASSWORD" $SERVICE_TENANT_NAME "glance-swift@example.com")
-            get_or_add_user_role "ResellerAdmin" $GLANCE_SWIFT_USER $SERVICE_TENANT_NAME
+            get_or_add_user_role "ResellerAdmin" $glance_swift_user $SERVICE_TENANT_NAME
         fi
 
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
 
-            GLANCE_SERVICE=$(get_or_create_service "glance" \
+            local glance_service=$(get_or_create_service "glance" \
                 "image" "Glance Image Service")
-            get_or_create_endpoint $GLANCE_SERVICE \
+            get_or_create_endpoint $glance_service \
                 "$REGION_NAME" \
-                "http://$GLANCE_HOSTPORT" \
-                "http://$GLANCE_HOSTPORT" \
-                "http://$GLANCE_HOSTPORT"
+                "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" \
+                "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" \
+                "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT"
         fi
     fi
 }
@@ -217,6 +278,9 @@
     # Migrate glance database
     $GLANCE_BIN_DIR/glance-manage db_sync
 
+    # Load metadata definitions
+    $GLANCE_BIN_DIR/glance-manage db_load_metadefs
+
     create_glance_cache_dir
 }
 
@@ -228,16 +292,28 @@
 
 # install_glance() - Collect source and prepare
 function install_glance {
+    # Install glance_store from git so we make sure we're testing
+    # the latest code.
+    git_clone $GLANCE_STORE_REPO $GLANCE_STORE_DIR $GLANCE_STORE_BRANCH
+    setup_develop $GLANCE_STORE_DIR
+
     git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH
     setup_develop $GLANCE_DIR
 }
 
 # start_glance() - Start running processes, including screen
 function start_glance {
-    screen_it g-reg "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf"
-    screen_it g-api "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf"
+    local service_protocol=$GLANCE_SERVICE_PROTOCOL
+    if is_service_enabled tls-proxy; then
+        start_tls_proxy '*' $GLANCE_SERVICE_PORT $GLANCE_SERVICE_HOST $GLANCE_SERVICE_PORT_INT &
+        start_tls_proxy '*' $GLANCE_REGISTRY_PORT $GLANCE_SERVICE_HOST $GLANCE_REGISTRY_PORT_INT &
+    fi
+
+    run_process g-reg "$GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf"
+    run_process g-api "$GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf"
+
     echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..."
-    if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then
+    if ! wait_for_service $SERVICE_TIMEOUT $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT; then
         die $LINENO "g-api did not start"
     fi
 }
@@ -245,8 +321,8 @@
 # stop_glance() - Stop running processes
 function stop_glance {
     # Kill the Glance screen windows
-    screen_stop g-api
-    screen_stop g-reg
+    stop_process g-api
+    stop_process g-reg
 }
 
 
diff --git a/lib/heat b/lib/heat
index afed52b..737598d 100644
--- a/lib/heat
+++ b/lib/heat
@@ -31,6 +31,8 @@
 # set up default directories
 HEAT_DIR=$DEST/heat
 HEATCLIENT_DIR=$DEST/python-heatclient
+HEAT_CFNTOOLS_DIR=$DEST/heat-cfntools
+HEAT_TEMPLATES_REPO_DIR=$DEST/heat-templates
 HEAT_AUTH_CACHE_DIR=${HEAT_AUTH_CACHE_DIR:-/var/cache/heat}
 HEAT_STANDALONE=`trueorfalse False $HEAT_STANDALONE`
 HEAT_CONF_DIR=/etc/heat
@@ -38,6 +40,8 @@
 HEAT_ENV_DIR=$HEAT_CONF_DIR/environment.d
 HEAT_TEMPLATES_DIR=$HEAT_CONF_DIR/templates
 HEAT_STACK_DOMAIN=`trueorfalse True $HEAT_STACK_DOMAIN`
+HEAT_API_HOST=${HEAT_API_HOST:-$HOST_IP}
+HEAT_API_PORT=${HEAT_API_PORT:-8004}
 
 # other default options
 HEAT_DEFERRED_AUTH=${HEAT_DEFERRED_AUTH:-trusts}
@@ -67,6 +71,9 @@
 # configure_heat() - Set config files, create data dirs, etc
 function configure_heat {
     setup_develop $HEAT_DIR
+    if [[ "$HEAT_STANDALONE" = "True" ]]; then
+        setup_develop $HEAT_DIR/contrib/heat_keystoneclient_v2
+    fi
 
     if [[ ! -d $HEAT_CONF_DIR ]]; then
         sudo mkdir -p $HEAT_CONF_DIR
@@ -81,8 +88,6 @@
     HEAT_ENGINE_PORT=${HEAT_ENGINE_PORT:-8001}
     HEAT_API_CW_HOST=${HEAT_API_CW_HOST:-$HOST_IP}
     HEAT_API_CW_PORT=${HEAT_API_CW_PORT:-8003}
-    HEAT_API_HOST=${HEAT_API_HOST:-$HOST_IP}
-    HEAT_API_PORT=${HEAT_API_PORT:-8004}
     HEAT_API_PASTE_FILE=$HEAT_CONF_DIR/api-paste.ini
     HEAT_POLICY_FILE=$HEAT_CONF_DIR/policy.json
 
@@ -96,7 +101,7 @@
     iniset $HEAT_CONF DEFAULT heat_waitcondition_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1/waitcondition
     iniset $HEAT_CONF DEFAULT heat_watch_server_url http://$HEAT_API_CW_HOST:$HEAT_API_CW_PORT
     iniset $HEAT_CONF database connection `database_connection_url heat`
-    iniset $HEAT_CONF DEFAULT auth_encryption_key `hexdump -n 16 -v -e '/1 "%02x"' /dev/urandom`
+    iniset $HEAT_CONF DEFAULT auth_encryption_key $(generate_hex_string 16)
 
     iniset $HEAT_CONF DEFAULT region_name_for_services "$REGION_NAME"
 
@@ -108,24 +113,21 @@
         setup_colorized_logging $HEAT_CONF DEFAULT tenant user
     fi
 
-    # keystone authtoken
-    iniset $HEAT_CONF keystone_authtoken identity_uri $KEYSTONE_AUTH_URI
-    configure_API_version $HEAT_CONF $IDENTITY_API_VERSION
-    iniset $HEAT_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA
-    iniset $HEAT_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
-    iniset $HEAT_CONF keystone_authtoken admin_user heat
-    iniset $HEAT_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
-    iniset $HEAT_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR
+    configure_auth_token_middleware $HEAT_CONF heat $HEAT_AUTH_CACHE_DIR
 
     if is_ssl_enabled_service "key"; then
-        iniset $HEAT_CONF clients_keystone ca_file $KEYSTONE_SSL_CA
+        iniset $HEAT_CONF clients_keystone ca_file $SSL_BUNDLE_FILE
     fi
 
     # ec2authtoken
     iniset $HEAT_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_URI/v2.0
 
     # paste_deploy
-    [[ "$HEAT_STANDALONE" = "True" ]] && iniset $HEAT_CONF paste_deploy flavor standalone
+    if [[ "$HEAT_STANDALONE" = "True" ]]; then
+        iniset $HEAT_CONF paste_deploy flavor standalone
+        iniset $HEAT_CONF DEFAULT keystone_backend heat_keystoneclient_v2.client.KeystoneClientV2
+        iniset $HEAT_CONF clients_heat url "http://$HEAT_API_HOST:$HEAT_API_PORT/v1/%(tenant_id)s"
+    fi
 
     # OpenStack API
     iniset $HEAT_CONF heat_api bind_port $HEAT_API_PORT
@@ -136,6 +138,18 @@
     # Cloudwatch API
     iniset $HEAT_CONF heat_api_cloudwatch bind_port $HEAT_API_CW_PORT
 
+    if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then
+        iniset $HEAT_CONF clients_keystone ca_file $SSL_BUNDLE_FILE
+    fi
+
+    if is_ssl_enabled_service "nova" || is_service_enabled tls-proxy; then
+        iniset $HEAT_CONF clients_nova ca_file $SSL_BUNDLE_FILE
+    fi
+
+    if is_ssl_enabled_service "cinder" || is_service_enabled tls-proxy; then
+        iniset $HEAT_CONF clients_cinder ca_file $SSL_BUNDLE_FILE
+    fi
+
     # heat environment
     sudo mkdir -p $HEAT_ENV_DIR
     sudo chown $STACK_USER $HEAT_ENV_DIR
@@ -179,60 +193,52 @@
     git_clone $HEAT_REPO $HEAT_DIR $HEAT_BRANCH
 }
 
+# install_heat_other() - Collect source and prepare
+function install_heat_other {
+    git_clone $HEAT_CFNTOOLS_REPO $HEAT_CFNTOOLS_DIR $HEAT_CFNTOOLS_BRANCH
+    git_clone $HEAT_TEMPLATES_REPO $HEAT_TEMPLATES_REPO_DIR $HEAT_TEMPLATES_BRANCH
+}
+
 # start_heat() - Start running processes, including screen
 function start_heat {
-    screen_it h-eng "cd $HEAT_DIR; bin/heat-engine --config-file=$HEAT_CONF"
-    screen_it h-api "cd $HEAT_DIR; bin/heat-api --config-file=$HEAT_CONF"
-    screen_it h-api-cfn "cd $HEAT_DIR; bin/heat-api-cfn --config-file=$HEAT_CONF"
-    screen_it h-api-cw "cd $HEAT_DIR; bin/heat-api-cloudwatch --config-file=$HEAT_CONF"
+    run_process h-eng "$HEAT_DIR/bin/heat-engine --config-file=$HEAT_CONF"
+    run_process h-api "$HEAT_DIR/bin/heat-api --config-file=$HEAT_CONF"
+    run_process h-api-cfn "$HEAT_DIR/bin/heat-api-cfn --config-file=$HEAT_CONF"
+    run_process h-api-cw "$HEAT_DIR/bin/heat-api-cloudwatch --config-file=$HEAT_CONF"
 }
 
 # stop_heat() - Stop running processes
 function stop_heat {
     # Kill the screen windows
+    local serv
     for serv in h-eng h-api h-api-cfn h-api-cw; do
-        screen_stop $serv
+        stop_process $serv
     done
 }
 
-function disk_image_create {
-    local elements_path=$1
-    local elements=$2
-    local arch=$3
-    local output=$TOP_DIR/files/$4
-    if [[ -f "$output.qcow2" ]]; then
-        echo "Image file already exists: $output_file"
-    else
-        ELEMENTS_PATH=$elements_path disk-image-create \
-            $elements -a $arch -o $output
-    fi
-    # upload with fake URL so that image in $TOP_DIR/files is used
-    upload_image "http://localhost/$output.qcow2" $TOKEN
-}
-
 # create_heat_accounts() - Set up common required heat accounts
 function create_heat_accounts {
     # migrated from files/keystone_data.sh
-    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
+    local service_tenant=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    local admin_role=$(openstack role list | awk "/ admin / { print \$2 }")
 
-    HEAT_USER=$(get_or_create_user "heat" \
-        "$SERVICE_PASSWORD" $SERVICE_TENANT "heat@example.com")
-    get_or_add_user_role $ADMIN_ROLE $HEAT_USER $SERVICE_TENANT
+    local heat_user=$(get_or_create_user "heat" \
+        "$SERVICE_PASSWORD" $service_tenant)
+    get_or_add_user_role $admin_role $heat_user $service_tenant
 
     if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
 
-        HEAT_SERVICE=$(get_or_create_service "heat" \
+        local heat_service=$(get_or_create_service "heat" \
                 "orchestration" "Heat Orchestration Service")
-        get_or_create_endpoint $HEAT_SERVICE \
+        get_or_create_endpoint $heat_service \
             "$REGION_NAME" \
             "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \
             "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \
             "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s"
 
-        HEAT_CFN_SERVICE=$(get_or_create_service "heat-cfn" \
+        local heat_cfn_service=$(get_or_create_service "heat-cfn" \
                 "cloudformation" "Heat CloudFormation Service")
-        get_or_create_endpoint $HEAT_CFN_SERVICE \
+        get_or_create_endpoint $heat_cfn_service \
             "$REGION_NAME" \
             "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \
             "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \
@@ -247,36 +253,36 @@
         # heat_stack_owner role is given to users who create Heat stacks,
         # it's the default role used by heat to delegate to the heat service
         # user (for performing deferred operations via trusts), see heat.conf
-        HEAT_OWNER_ROLE=$(get_or_create_role "heat_stack_owner")
+        local heat_owner_role=$(get_or_create_role "heat_stack_owner")
 
         # Give the role to the demo and admin users so they can create stacks
         # in either of the projects created by devstack
-        get_or_add_user_role $HEAT_OWNER_ROLE demo demo
-        get_or_add_user_role $HEAT_OWNER_ROLE admin demo
-        get_or_add_user_role $HEAT_OWNER_ROLE admin admin
+        get_or_add_user_role $heat_owner_role demo demo
+        get_or_add_user_role $heat_owner_role admin demo
+        get_or_add_user_role $heat_owner_role admin admin
         iniset $HEAT_CONF DEFAULT deferred_auth_method trusts
     fi
 
     if [[ "$HEAT_STACK_DOMAIN" == "True" ]]; then
         # Note we have to pass token/endpoint here because the current endpoint and
         # version negotiation in OSC means just --os-identity-api-version=3 won't work
-        KS_ENDPOINT_V3="$KEYSTONE_SERVICE_URI/v3"
+        local ks_endpoint_v3="$KEYSTONE_SERVICE_URI/v3"
 
-        D_ID=$(openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \
+        D_ID=$(openstack --os-token $OS_TOKEN --os-url=$ks_endpoint_v3 \
             --os-identity-api-version=3 domain list | grep ' heat ' | get_field 1)
 
         if [[ -z "$D_ID" ]]; then
-            D_ID=$(openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \
+            D_ID=$(openstack --os-token $OS_TOKEN --os-url=$ks_endpoint_v3 \
                 --os-identity-api-version=3 domain create heat \
                 --description "Owns users and projects created by heat" \
                 | grep ' id ' | get_field 2)
             iniset $HEAT_CONF DEFAULT stack_user_domain ${D_ID}
 
-            openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \
+            openstack --os-token $OS_TOKEN --os-url=$ks_endpoint_v3 \
                 --os-identity-api-version=3 user create --password $SERVICE_PASSWORD \
                 --domain $D_ID heat_domain_admin \
                 --description "Manages users and projects created by heat"
-            openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \
+            openstack --os-token $OS_TOKEN --os-url=$ks_endpoint_v3 \
                 --os-identity-api-version=3 role add \
                 --user heat_domain_admin --domain ${D_ID} admin
             iniset $HEAT_CONF DEFAULT stack_domain_admin heat_domain_admin
@@ -285,6 +291,23 @@
     fi
 }
 
+# build_heat_functional_test_image() - Build and upload functional test image
+function build_heat_functional_test_image {
+    build_dib_pip_repo "$OCC_DIR $OAC_DIR $ORC_DIR $HEAT_CFNTOOLS_DIR"
+    local image_name=heat-functional-tests-image
+
+    # The elements to invoke disk-image-create with
+    local image_elements="vm fedora selinux-permissive pypi \
+        os-collect-config os-refresh-config os-apply-config heat-cfntools \
+        heat-config heat-config-cfn-init heat-config-puppet heat-config-script"
+
+    # Elements path for tripleo-image-elements and heat-templates software-config
+    local elements_path=$TIE_DIR/elements:$HEAT_TEMPLATES_REPO_DIR/hot/software-config/elements
+
+    disk_image_create_upload "$image_name" "$image_elements" "$elements_path"
+    iniset $TEMPEST_CONFIG orchestration image_ref $image_name
+}
+
 # Restore xtrace
 $XTRACE
 
diff --git a/lib/horizon b/lib/horizon
index a65b243..755be18 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -50,7 +50,7 @@
         sed -e "/^$option/d" -i $local_settings
         echo -e "\n$option=$value" >> $file
     elif grep -q "^$section" $file; then
-        line=$(sed -ne "/^$section/,/^}/ { /^ *'$option':/ p; }" $file)
+        local line=$(sed -ne "/^$section/,/^}/ { /^ *'$option':/ p; }" $file)
         if [ -n "$line" ]; then
             sed -i -e "/^$section/,/^}/ s/^\( *'$option'\) *:.*$/\1: $value,/" $file
         else
@@ -89,29 +89,14 @@
 # init_horizon() - Initialize databases, etc.
 function init_horizon {
     # ``local_settings.py`` is used to override horizon default settings.
-    local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py
+    local local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py
     cp $HORIZON_SETTINGS $local_settings
 
-    if is_service_enabled neutron; then
-        _horizon_config_set $local_settings OPENSTACK_NEUTRON_NETWORK enable_security_group $Q_USE_SECGROUP
-    fi
-    # enable loadbalancer dashboard in case service is enabled
-    if is_service_enabled q-lbaas; then
-        _horizon_config_set $local_settings OPENSTACK_NEUTRON_NETWORK enable_lb True
-    fi
-
-    # enable firewall dashboard in case service is enabled
-    if is_service_enabled q-fwaas; then
-        _horizon_config_set $local_settings OPENSTACK_NEUTRON_NETWORK enable_firewall True
-    fi
-
-    # enable VPN dashboard in case service is enabled
-    if is_service_enabled q-vpn; then
-        _horizon_config_set $local_settings OPENSTACK_NEUTRON_NETWORK enable_vpn True
-    fi
-
     _horizon_config_set $local_settings "" OPENSTACK_HOST \"${KEYSTONE_SERVICE_HOST}\"
     _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_URL "\"${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}:${KEYSTONE_SERVICE_PORT}/v2.0\""
+    if [[ -n "$KEYSTONE_TOKEN_HASH_ALGORITHM" ]]; then
+        _horizon_config_set $local_settings "" OPENSTACK_TOKEN_HASH_ALGORITHM \""$KEYSTONE_TOKEN_HASH_ALGORITHM"\"
+    fi
 
     if [ -f $SSL_BUNDLE_FILE ]; then
         _horizon_config_set $local_settings "" OPENSTACK_SSL_CACERT \"${SSL_BUNDLE_FILE}\"
@@ -120,12 +105,6 @@
     # Create an empty directory that apache uses as docroot
     sudo mkdir -p $HORIZON_DIR/.blackhole
 
-    # Apache 2.4 uses mod_authz_host for access control now (instead of "Allow")
-    HORIZON_REQUIRE=''
-    if check_apache_version "2.4" ; then
-        HORIZON_REQUIRE='Require all granted'
-    fi
-
     local horizon_conf=$(apache_site_config_for horizon)
 
     # Configure apache to run horizon
@@ -135,7 +114,6 @@
         s,%HORIZON_DIR%,$HORIZON_DIR,g;
         s,%APACHE_NAME%,$APACHE_NAME,g;
         s,%DEST%,$DEST,g;
-        s,%HORIZON_REQUIRE%,$HORIZON_REQUIRE,g;
     \" $FILES/apache-horizon.template >$horizon_conf"
 
     if is_ubuntu; then
@@ -152,6 +130,7 @@
 
     # Remove old log files that could mess with how devstack detects whether Horizon
     # has been successfully started (see start_horizon() and functions::screen_it())
+    # and run_process
     sudo rm -f /var/log/$APACHE_NAME/horizon_*
 
 }
@@ -173,7 +152,7 @@
 # start_horizon() - Start running processes, including screen
 function start_horizon {
     restart_apache_server
-    screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/$APACHE_NAME/horizon_error.log"
+    tail_log horizon /var/log/$APACHE_NAME/horizon_error.log
 }
 
 # stop_horizon() - Stop running processes (non-screen)
diff --git a/lib/infra b/lib/infra
index e2f7dad..e18c66e 100644
--- a/lib/infra
+++ b/lib/infra
@@ -10,7 +10,6 @@
 
 # ``stack.sh`` calls the entry points in this order:
 #
-# - unfubar_setuptools
 # - install_infra
 
 # Save trace setting
@@ -26,19 +25,6 @@
 # Entry Points
 # ------------
 
-# unfubar_setuptools() - Unbreak the giant mess that is the current state of setuptools
-function unfubar_setuptools {
-    # this is a giant game of who's on first, but it does consistently work
-    # there is hope that upstream python packaging fixes this in the future
-    echo_summary "Unbreaking setuptools"
-    pip_install -U setuptools
-    pip_install -U pip
-    uninstall_package python-setuptools
-    pip_install -U setuptools
-    pip_install -U pip
-}
-
-
 # install_infra() - Collect source and prepare
 function install_infra {
     # bring down global requirements
diff --git a/lib/ironic b/lib/ironic
index ef136bc..5f3ebcd 100644
--- a/lib/ironic
+++ b/lib/ironic
@@ -29,6 +29,7 @@
 
 # Set up default directories
 IRONIC_DIR=$DEST/ironic
+IRONIC_PYTHON_AGENT_DIR=$DEST/ironic-python-agent
 IRONIC_DATA_DIR=$DATA_DIR/ironic
 IRONIC_STATE_PATH=/var/lib/ironic
 IRONICCLIENT_DIR=$DEST/python-ironicclient
@@ -66,8 +67,6 @@
 IRONIC_VM_LOG_CONSOLE=${IRONIC_VM_LOG_CONSOLE:-True}
 IRONIC_VM_LOG_DIR=${IRONIC_VM_LOG_DIR:-$IRONIC_DATA_DIR/logs/}
 
-DIB_DIR=${DIB_DIR:-$DEST/diskimage-builder}
-
 # Use DIB to create deploy ramdisk and kernel.
 IRONIC_BUILD_DEPLOY_RAMDISK=`trueorfalse True $IRONIC_BUILD_DEPLOY_RAMDISK`
 # If not use DIB, these files are used as deploy ramdisk/kernel.
@@ -76,6 +75,13 @@
 IRONIC_DEPLOY_KERNEL=${IRONIC_DEPLOY_KERNEL:-}
 IRONIC_DEPLOY_ELEMENT=${IRONIC_DEPLOY_ELEMENT:-deploy-ironic}
 
+IRONIC_AGENT_KERNEL_URL=${IRONIC_AGENT_KERNEL_URL:-http://tarballs.openstack.org/ironic-python-agent/coreos/files/coreos_production_pxe.vmlinuz}
+IRONIC_AGENT_RAMDISK_URL=${IRONIC_AGENT_RAMDISK_URL:-http://tarballs.openstack.org/ironic-python-agent/coreos/files/coreos_production_pxe_image-oem.cpio.gz}
+
+# Which deploy driver to use - valid choices right now
+# are 'pxe_ssh' and 'agent_ssh'.
+IRONIC_DEPLOY_DRIVER=${IRONIC_DEPLOY_DRIVER:-pxe_ssh}
+
 #TODO(agordeev): replace 'ubuntu' with host distro name getting
 IRONIC_DEPLOY_FLAVOR=${IRONIC_DEPLOY_FLAVOR:-ubuntu $IRONIC_DEPLOY_ELEMENT}
 
@@ -89,6 +95,32 @@
 # Tell Tempest this project is present
 TEMPEST_SERVICES+=,ironic
 
+# Enable iPXE
+IRONIC_IPXE_ENABLED=$(trueorfalse False $IRONIC_IPXE_ENABLED)
+IRONIC_HTTP_DIR=${IRONIC_HTTP_DIR:-$IRONIC_DATA_DIR/httpboot}
+IRONIC_HTTP_SERVER=${IRONIC_HTTP_SERVER:-$HOST_IP}
+IRONIC_HTTP_PORT=${IRONIC_HTTP_PORT:-8088}
+
+# get_pxe_boot_file() - Get the PXE/iPXE boot file path
+function get_pxe_boot_file {
+    local relpath=syslinux/pxelinux.0
+    if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then
+        relpath=ipxe/undionly.kpxe
+    fi
+
+    local pxe_boot_file
+    if is_ubuntu; then
+        pxe_boot_file=/usr/lib/$relpath
+    elif is_fedora || is_suse; then
+        pxe_boot_file=/usr/share/$relpath
+    fi
+
+    echo $pxe_boot_file
+}
+
+# PXE boot image
+IRONIC_PXE_BOOT_IMAGE=${IRONIC_PXE_BOOT_IMAGE:-$(get_pxe_boot_file)}
+
 
 # Functions
 # ---------
@@ -102,14 +134,44 @@
 
 # install_ironic() - Collect source and prepare
 function install_ironic {
+    # make sure all needed service were enabled
+    for srv in nova glance key; do
+        if ! is_service_enabled "$srv"; then
+            die $LINENO "$srv should be enabled for Ironic."
+        fi
+    done
     git_clone $IRONIC_REPO $IRONIC_DIR $IRONIC_BRANCH
     setup_develop $IRONIC_DIR
+
+    if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then
+        install_apache_wsgi
+    fi
 }
 
 # install_ironicclient() - Collect sources and prepare
 function install_ironicclient {
     git_clone $IRONICCLIENT_REPO $IRONICCLIENT_DIR $IRONICCLIENT_BRANCH
     setup_develop $IRONICCLIENT_DIR
+    sudo install -D -m 0644 -o $STACK_USER {$IRONICCLIENT_DIR/tools/,/etc/bash_completion.d/}ironic.bash_completion
+}
+
+# _cleanup_ironic_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file
+function _cleanup_ironic_apache_wsgi {
+    sudo rm -rf $IRONIC_HTTP_DIR
+    disable_apache_site ironic
+    sudo rm -f $(apache_site_config_for ironic)
+    restart_apache_server
+}
+
+# _config_ironic_apache_wsgi() - Set WSGI config files of Ironic
+function _config_ironic_apache_wsgi {
+    local ironic_apache_conf=$(apache_site_config_for ironic)
+    sudo cp $FILES/apache-ironic.template $ironic_apache_conf
+    sudo sed -e "
+        s|%PUBLICPORT%|$IRONIC_HTTP_PORT|g;
+        s|%HTTPROOT%|$IRONIC_HTTP_DIR|g;
+    " -i $ironic_apache_conf
+    enable_apache_site ironic
 }
 
 # cleanup_ironic() - Remove residual data files, anything left over from previous
@@ -118,11 +180,35 @@
     sudo rm -rf $IRONIC_AUTH_CACHE_DIR
 }
 
-# configure_ironic() - Set config files, create data dirs, etc
-function configure_ironic {
+# configure_ironic_dirs() - Create all directories required by Ironic and
+# associated services.
+function configure_ironic_dirs {
     if [[ ! -d $IRONIC_CONF_DIR ]]; then
         sudo mkdir -p $IRONIC_CONF_DIR
     fi
+
+    if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then
+        sudo mkdir -p $IRONIC_HTTP_DIR
+        sudo chown -R $STACK_USER:$LIBVIRT_GROUP $IRONIC_HTTP_DIR
+    fi
+
+    sudo mkdir -p $IRONIC_DATA_DIR
+    sudo mkdir -p $IRONIC_STATE_PATH
+    sudo mkdir -p $IRONIC_TFTPBOOT_DIR
+    sudo chown -R $STACK_USER $IRONIC_DATA_DIR $IRONIC_STATE_PATH
+    sudo chown -R $STACK_USER:$LIBVIRT_GROUP $IRONIC_TFTPBOOT_DIR
+    mkdir -p $IRONIC_TFTPBOOT_DIR/pxelinux.cfg
+
+    if [ ! -f $IRONIC_PXE_BOOT_IMAGE ]; then
+        die $LINENO "PXE boot file $IRONIC_PXE_BOOT_IMAGE not found."
+    fi
+
+    cp $IRONIC_PXE_BOOT_IMAGE $IRONIC_TFTPBOOT_DIR
+}
+
+# configure_ironic() - Set config files, create data dirs, etc
+function configure_ironic {
+    configure_ironic_dirs
     sudo chown $STACK_USER $IRONIC_CONF_DIR
 
     # Copy over ironic configuration file and configure common parameters.
@@ -147,8 +233,8 @@
         setup_colorized_logging $IRONIC_CONF_FILE DEFAULT
     fi
 
-    if [[ "$IRONIC_BAREMETAL_BASIC_OPS" == "True" ]]; then
-        configure_ironic_auxiliary
+    if [[ "$IRONIC_IPXE_ENABLED" == "True" ]]; then
+        _config_ironic_apache_wsgi
     fi
 }
 
@@ -157,14 +243,8 @@
 function configure_ironic_api {
     iniset $IRONIC_CONF_FILE DEFAULT auth_strategy keystone
     iniset $IRONIC_CONF_FILE DEFAULT policy_file $IRONIC_POLICY_JSON
-    iniset $IRONIC_CONF_FILE keystone_authtoken identity_uri $KEYSTONE_AUTH_URI
-    iniset $IRONIC_CONF_FILE keystone_authtoken cafile $KEYSTONE_SSL_CA
-    iniset $IRONIC_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_URI
-    iniset $IRONIC_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
-    iniset $IRONIC_CONF_FILE keystone_authtoken admin_user ironic
-    iniset $IRONIC_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD
+    configure_auth_token_middleware $IRONIC_CONF_FILE ironic $IRONIC_AUTH_CACHE_DIR/api
     iniset_rpc_backend ironic $IRONIC_CONF_FILE DEFAULT
-    iniset $IRONIC_CONF_FILE keystone_authtoken signing_dir $IRONIC_AUTH_CACHE_DIR/api
 
     cp -p $IRONIC_DIR/etc/ironic/policy.json $IRONIC_POLICY_JSON
 }
@@ -174,15 +254,15 @@
 function configure_ironic_conductor {
     cp $IRONIC_DIR/etc/ironic/rootwrap.conf $IRONIC_ROOTWRAP_CONF
     cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_CONF_DIR
-    IRONIC_ROOTWRAP=$(get_rootwrap_location ironic)
-    ROOTWRAP_ISUDOER_CMD="$IRONIC_ROOTWRAP $IRONIC_CONF_DIR/rootwrap.conf *"
+    local ironic_rootwrap=$(get_rootwrap_location ironic)
+    local rootwrap_isudoer_cmd="$ironic_rootwrap $IRONIC_CONF_DIR/rootwrap.conf *"
 
     # Set up the rootwrap sudoers for ironic
-    TEMPFILE=`mktemp`
-    echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_ISUDOER_CMD" >$TEMPFILE
-    chmod 0440 $TEMPFILE
-    sudo chown root:root $TEMPFILE
-    sudo mv $TEMPFILE /etc/sudoers.d/ironic-rootwrap
+    local tempfile=`mktemp`
+    echo "$STACK_USER ALL=(root) NOPASSWD: $rootwrap_isudoer_cmd" >$tempfile
+    chmod 0440 $tempfile
+    sudo chown root:root $tempfile
+    sudo mv $tempfile /etc/sudoers.d/ironic-rootwrap
 
     iniset $IRONIC_CONF_FILE DEFAULT rootwrap_config $IRONIC_ROOTWRAP_CONF
     iniset $IRONIC_CONF_FILE DEFAULT enabled_drivers $IRONIC_ENABLED_DRIVERS
@@ -193,6 +273,31 @@
     if [[ "$IRONIC_VM_LOG_CONSOLE" == "True" ]] ; then
         iniset $IRONIC_CONF_FILE pxe pxe_append_params "nofb nomodeset vga=normal console=ttyS0"
     fi
+    if [[ "$IRONIC_DEPLOY_DRIVER" == "agent_ssh" ]] ; then
+        if [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]] ; then
+            iniset $IRONIC_CONF_FILE glance swift_temp_url_key $SWIFT_TEMPURL_KEY
+        else
+            die $LINENO "SWIFT_ENABLE_TEMPURLS must be True to use agent_ssh driver in Ironic."
+        fi
+        iniset $IRONIC_CONF_FILE glance swift_endpoint_url http://${HOST_IP}:8080
+        iniset $IRONIC_CONF_FILE glance swift_api_version v1
+        iniset $IRONIC_CONF_FILE glance swift_account AUTH_${SERVICE_TENANT}
+        iniset $IRONIC_CONF_FILE glance swift_container glance
+        iniset $IRONIC_CONF_FILE glance swift_temp_url_duration 3600
+        iniset $IRONIC_CONF_FILE agent heartbeat_timeout 30
+        if [[ "$IRONIC_VM_LOG_CONSOLE" == "True" ]] ; then
+            iniset $IRONIC_CONF_FILE agent agent_pxe_append_params "nofb nomodeset vga=normal console=ttyS0"
+        fi
+    fi
+
+    if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then
+        local pxebin=`basename $IRONIC_PXE_BOOT_IMAGE`
+        iniset $IRONIC_CONF_FILE pxe ipxe_enabled True
+        iniset $IRONIC_CONF_FILE pxe pxe_config_template '\$pybasedir/drivers/modules/ipxe_config.template'
+        iniset $IRONIC_CONF_FILE pxe pxe_bootfile_name $pxebin
+        iniset $IRONIC_CONF_FILE pxe http_root $IRONIC_HTTP_DIR
+        iniset $IRONIC_CONF_FILE pxe http_url "http://$IRONIC_HTTP_SERVER:$IRONIC_HTTP_PORT"
+    fi
 }
 
 # create_ironic_cache_dir() - Part of the init_ironic() process
@@ -213,22 +318,22 @@
 # service              ironic     admin        # if enabled
 function create_ironic_accounts {
 
-    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
+    local service_tenant=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    local admin_role=$(openstack role list | awk "/ admin / { print \$2 }")
 
     # Ironic
     if [[ "$ENABLED_SERVICES" =~ "ir-api" ]]; then
         # Get ironic user if exists
 
-        IRONIC_USER=$(get_or_create_user "ironic" \
-            "$SERVICE_PASSWORD" $SERVICE_TENANT "ironic@example.com")
-        get_or_add_user_role $ADMIN_ROLE $IRONIC_USER $SERVICE_TENANT
+        local ironic_user=$(get_or_create_user "ironic" \
+            "$SERVICE_PASSWORD" $service_tenant)
+        get_or_add_user_role $admin_role $ironic_user $service_tenant
 
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
 
-            IRONIC_SERVICE=$(get_or_create_service "ironic" \
+            local ironic_service=$(get_or_create_service "ironic" \
                 "baremetal" "Ironic baremetal provisioning service")
-            get_or_create_endpoint $IRONIC_SERVICE \
+            get_or_create_endpoint $ironic_service \
                 "$REGION_NAME" \
                 "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" \
                 "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" \
@@ -260,12 +365,17 @@
     if is_service_enabled ir-cond; then
         start_ironic_conductor
     fi
+
+    # Start Apache if iPXE is enabled
+    if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then
+        restart_apache_server
+    fi
 }
 
 # start_ironic_api() - Used by start_ironic().
 # Starts Ironic API server.
 function start_ironic_api {
-    screen_it ir-api "cd $IRONIC_DIR; $IRONIC_BIN_DIR/ironic-api --config-file=$IRONIC_CONF_FILE"
+    run_process ir-api "$IRONIC_BIN_DIR/ironic-api --config-file=$IRONIC_CONF_FILE"
     echo "Waiting for ir-api ($IRONIC_HOSTPORT) to start..."
     if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$IRONIC_HOSTPORT; do sleep 1; done"; then
         die $LINENO "ir-api did not start"
@@ -275,7 +385,7 @@
 # start_ironic_conductor() - Used by start_ironic().
 # Starts Ironic conductor.
 function start_ironic_conductor {
-    screen_it ir-cond "cd $IRONIC_DIR; $IRONIC_BIN_DIR/ironic-conductor --config-file=$IRONIC_CONF_FILE"
+    run_process ir-cond "$IRONIC_BIN_DIR/ironic-conductor --config-file=$IRONIC_CONF_FILE"
     # TODO(romcheg): Find a way to check whether the conductor has started.
 }
 
@@ -284,6 +394,11 @@
     # Kill the Ironic screen windows
     screen -S $SCREEN_NAME -p ir-api -X kill
     screen -S $SCREEN_NAME -p ir-cond -X kill
+
+    # Cleanup the WSGI files
+    if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then
+        _cleanup_ironic_apache_wsgi
+    fi
 }
 
 function is_ironic {
@@ -293,58 +408,72 @@
     return 1
 }
 
-function configure_ironic_dirs {
-    sudo mkdir -p $IRONIC_DATA_DIR
-    sudo mkdir -p $IRONIC_STATE_PATH
-    sudo mkdir -p $IRONIC_TFTPBOOT_DIR
-    sudo chown -R $STACK_USER $IRONIC_DATA_DIR $IRONIC_STATE_PATH
-    sudo chown -R $STACK_USER:$LIBVIRT_GROUP $IRONIC_TFTPBOOT_DIR
-    if is_ubuntu; then
-        PXEBIN=/usr/lib/syslinux/pxelinux.0
-    elif is_fedora; then
-        PXEBIN=/usr/share/syslinux/pxelinux.0
-    fi
-    if [ ! -f $PXEBIN ]; then
-        die $LINENO "pxelinux.0 (from SYSLINUX) not found."
-    fi
+function create_ovs_taps {
+    local ironic_net_id=$(neutron net-list | grep private | get_field 1)
 
-    cp $PXEBIN $IRONIC_TFTPBOOT_DIR
-    mkdir -p $IRONIC_TFTPBOOT_DIR/pxelinux.cfg
+    # Work around: No netns exists on host until a Neutron port is created.  We
+    # need to create one in Neutron to know what netns to tap into prior to the
+    # first node booting.
+    local port_id=$(neutron port-create private | grep " id " | get_field 2)
+
+    # intentional sleep to make sure the tag has been set to port
+    sleep 10
+
+    local tapdev=$(sudo ip netns exec qdhcp-${ironic_net_id} ip link list | grep tap | cut -d':' -f2 | cut -b2-)
+    local tag_id=$(sudo ovs-vsctl show |grep ${tapdev} -A1 -m1 | grep tag | cut -d':' -f2 | cut -b2-)
+
+    # make sure veth pair is not existing, otherwise delete its links
+    sudo ip link show ovs-tap1 && sudo ip link delete ovs-tap1
+    sudo ip link show brbm-tap1 && sudo ip link delete brbm-tap1
+    # create veth pair for future interconnection between br-int and brbm
+    sudo ip link add brbm-tap1 type veth peer name ovs-tap1
+    sudo ip link set dev brbm-tap1 up
+    sudo ip link set dev ovs-tap1 up
+
+    sudo ovs-vsctl -- --if-exists del-port ovs-tap1 -- add-port br-int ovs-tap1 tag=$tag_id
+    sudo ovs-vsctl -- --if-exists del-port brbm-tap1 -- add-port $IRONIC_VM_NETWORK_BRIDGE brbm-tap1
+
+    # Remove the port needed only for workaround.
+    neutron port-delete $port_id
+
+    # Finally, share the fixed tenant network across all tenants.  This allows the host
+    # to serve TFTP to a single network namespace via the tap device created above.
+    neutron net-update $ironic_net_id --shared true
 }
 
 function create_bridge_and_vms {
     # Call libvirt setup scripts in a new shell to ensure any new group membership
     sudo su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/setup-network"
     if [[ "$IRONIC_VM_LOG_CONSOLE" == "True" ]] ; then
-        LOG_ARG="$IRONIC_VM_LOG_DIR"
+        local log_arg="$IRONIC_VM_LOG_DIR"
     else
-        LOG_ARG=""
+        local log_arg=""
     fi
     sudo su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/create-nodes \
         $IRONIC_VM_SPECS_CPU $IRONIC_VM_SPECS_RAM $IRONIC_VM_SPECS_DISK \
         amd64 $IRONIC_VM_COUNT $IRONIC_VM_NETWORK_BRIDGE $IRONIC_VM_EMULATOR \
-        $LOG_ARG" >> $IRONIC_VM_MACS_CSV_FILE
+        $log_arg" >> $IRONIC_VM_MACS_CSV_FILE
+    create_ovs_taps
 }
 
 function enroll_vms {
-
-    CHASSIS_ID=$(ironic chassis-create -d "ironic test chassis" | grep " uuid " | get_field 2)
-    IRONIC_NET_ID=$(neutron net-list | grep private | get_field 1)
+    local chassis_id=$(ironic chassis-create -d "ironic test chassis" | grep " uuid " | get_field 2)
     local idx=0
 
-    # work around; need to know what netns neutron uses for private network.
-    # Without knowing how to interconnect the networks, PXE won't work properly
-    # for fake baremetal instances. The network should be configured prior all
-    # the instances operation. If we don't do this, the first port creation
-    # only happens in the middle of fake baremetal instance's spawning by nova,
-    # so we'll end up with unbootable fake baremetal VM due to broken PXE.
-    PORT_ID=$(neutron port-create private | grep " id " | get_field 2)
+    if [[ "$IRONIC_DEPLOY_DRIVER" == "pxe_ssh" ]] ; then
+        local _IRONIC_DEPLOY_KERNEL_KEY=pxe_deploy_kernel
+        local _IRONIC_DEPLOY_RAMDISK_KEY=pxe_deploy_ramdisk
+    elif [[ "$IRONIC_DEPLOY_DRIVER" == "agent_ssh" ]] ; then
+        local _IRONIC_DEPLOY_KERNEL_KEY=deploy_kernel
+        local _IRONIC_DEPLOY_RAMDISK_KEY=deploy_ramdisk
+    fi
 
     while read MAC; do
 
-        NODE_ID=$(ironic node-create --chassis_uuid $CHASSIS_ID --driver pxe_ssh \
-            -i pxe_deploy_kernel=$IRONIC_DEPLOY_KERNEL_ID \
-            -i pxe_deploy_ramdisk=$IRONIC_DEPLOY_RAMDISK_ID \
+        local node_id=$(ironic node-create --chassis_uuid $chassis_id \
+            --driver $IRONIC_DEPLOY_DRIVER \
+            -i $_IRONIC_DEPLOY_KERNEL_KEY=$IRONIC_DEPLOY_KERNEL_ID \
+            -i $_IRONIC_DEPLOY_RAMDISK_KEY=$IRONIC_DEPLOY_RAMDISK_ID \
             -i ssh_virt_type=$IRONIC_SSH_VIRT_TYPE \
             -i ssh_address=$IRONIC_VM_SSH_ADDRESS \
             -i ssh_port=$IRONIC_VM_SSH_PORT \
@@ -356,40 +485,23 @@
             -p cpu_arch=x86_64 \
             | grep " uuid " | get_field 2)
 
-        ironic port-create --address $MAC --node_uuid $NODE_ID
+        ironic port-create --address $MAC --node_uuid $node_id
 
         idx=$((idx+1))
-
     done < $IRONIC_VM_MACS_CSV_FILE
 
     # create the nova flavor
-    adjusted_disk=$(($IRONIC_VM_SPECS_DISK - $IRONIC_VM_EPHEMERAL_DISK))
-    nova flavor-create --ephemeral $IRONIC_VM_EPHEMERAL_DISK baremetal auto $IRONIC_VM_SPECS_RAM $adjusted_disk $IRONIC_VM_SPECS_CPU
+    # NOTE(adam_g): Attempting to use an autogenerated UUID for flavor id here uncovered
+    # bug (LP: #1333852) in Trove.  This can be changed to use an auto flavor id when the
+    # bug is fixed in Juno.
+    local adjusted_disk=$(($IRONIC_VM_SPECS_DISK - $IRONIC_VM_EPHEMERAL_DISK))
+    nova flavor-create --ephemeral $IRONIC_VM_EPHEMERAL_DISK baremetal 551 $IRONIC_VM_SPECS_RAM $adjusted_disk $IRONIC_VM_SPECS_CPU
+
     # TODO(lucasagomes): Remove the 'baremetal:deploy_kernel_id'
     # and 'baremetal:deploy_ramdisk_id' parameters
     # from the flavor after the completion of
     # https://blueprints.launchpad.net/ironic/+spec/add-node-instance-info
     nova flavor-key baremetal set "cpu_arch"="x86_64" "baremetal:deploy_kernel_id"="$IRONIC_DEPLOY_KERNEL_ID" "baremetal:deploy_ramdisk_id"="$IRONIC_DEPLOY_RAMDISK_ID"
-
-    # intentional sleep to make sure the tag has been set to port
-    sleep 10
-    TAPDEV=$(sudo ip netns exec qdhcp-${IRONIC_NET_ID} ip link list | grep tap | cut -d':' -f2 | cut -b2-)
-    TAG_ID=$(sudo ovs-vsctl show |grep ${TAPDEV} -A1 -m1 | grep tag | cut -d':' -f2 | cut -b2-)
-
-    # make sure veth pair is not existing, otherwise delete its links
-    sudo ip link show ovs-tap1 && sudo ip link delete ovs-tap1
-    sudo ip link show brbm-tap1 && sudo ip link delete brbm-tap1
-    # create veth pair for future interconnection between br-int and brbm
-    sudo ip link add brbm-tap1 type veth peer name ovs-tap1
-    sudo ip link set dev brbm-tap1 up
-    sudo ip link set dev ovs-tap1 up
-
-    sudo ovs-vsctl -- --if-exists del-port ovs-tap1 -- add-port br-int ovs-tap1 tag=$TAG_ID
-    sudo ovs-vsctl -- --if-exists del-port brbm-tap1 -- add-port $IRONIC_VM_NETWORK_BRIDGE brbm-tap1
-
-    # Remove the port needed only for workaround. For additional info read the
-    # comment at the beginning of this function
-    neutron port-delete $PORT_ID
 }
 
 function configure_iptables {
@@ -402,15 +514,6 @@
 }
 
 function configure_tftpd {
-    if is_ubuntu; then
-        PXEBIN=/usr/lib/syslinux/pxelinux.0
-    elif is_fedora; then
-        PXEBIN=/usr/share/syslinux/pxelinux.0
-    fi
-    if [ ! -f $PXEBIN ]; then
-        die $LINENO "pxelinux.0 (from SYSLINUX) not found."
-    fi
-
     # stop tftpd and setup serving via xinetd
     stop_service tftpd-hpa || true
     [ -f /etc/init/tftpd-hpa.conf ] && echo "manual" | sudo tee /etc/init/tftpd-hpa.override
@@ -440,33 +543,51 @@
 }
 
 function ironic_ssh_check {
-    local KEY_FILE=$1
-    local FLOATING_IP=$2
-    local PORT=$3
-    local DEFAULT_INSTANCE_USER=$4
-    local ACTIVE_TIMEOUT=$5
-    if ! timeout $ACTIVE_TIMEOUT sh -c "while ! ssh -p $PORT -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP echo success; do sleep 1; done"; then
+    local key_file=$1
+    local floating_ip=$2
+    local port=$3
+    local default_instance_user=$4
+    local active_timeout=$5
+    if ! timeout $active_timeout sh -c "while ! ssh -p $port -o StrictHostKeyChecking=no -i $key_file ${default_instance_user}@$floating_ip echo success; do sleep 1; done"; then
         die $LINENO "server didn't become ssh-able!"
     fi
 }
 
 function configure_ironic_auxiliary {
-    configure_ironic_dirs
     configure_ironic_ssh_keypair
     ironic_ssh_check $IRONIC_SSH_KEY_DIR/$IRONIC_SSH_KEY_FILENAME $IRONIC_VM_SSH_ADDRESS $IRONIC_VM_SSH_PORT $IRONIC_SSH_USERNAME 10
 }
 
+function build_ipa_coreos_ramdisk {
+    echo "Building ironic-python-agent deploy ramdisk"
+    local kernel_path=$1
+    local ramdisk_path=$2
+    git_clone $IRONIC_PYTHON_AGENT_REPO $IRONIC_PYTHON_AGENT_DIR $IRONIC_PYTHON_AGENT_BRANCH
+    cd $IRONIC_PYTHON_AGENT_DIR
+    imagebuild/coreos/build_coreos_image.sh
+    cp imagebuild/coreos/UPLOAD/coreos_production_pxe_image-oem.cpio.gz $ramdisk_path
+    cp imagebuild/coreos/UPLOAD/coreos_production_pxe.vmlinuz $kernel_path
+    sudo rm -rf UPLOAD
+    cd -
+}
+
 # build deploy kernel+ramdisk, then upload them to glance
-# this function sets IRONIC_DEPLOY_KERNEL_ID and IRONIC_DEPLOY_RAMDISK_ID
+# this function sets ``IRONIC_DEPLOY_KERNEL_ID``, ``IRONIC_DEPLOY_RAMDISK_ID``
 function upload_baremetal_ironic_deploy {
-    token=$1
+    declare -g IRONIC_DEPLOY_KERNEL_ID IRONIC_DEPLOY_RAMDISK_ID
+    echo_summary "Creating and uploading baremetal images for ironic"
+
+    # install diskimage-builder
+    if [[ $(type -P ramdisk-image-create) == "" ]]; then
+        pip_install diskimage_builder
+    fi
 
     if [ -z "$IRONIC_DEPLOY_KERNEL" -o -z "$IRONIC_DEPLOY_RAMDISK" ]; then
-        IRONIC_DEPLOY_KERNEL_PATH=$TOP_DIR/files/ir-deploy.kernel
-        IRONIC_DEPLOY_RAMDISK_PATH=$TOP_DIR/files/ir-deploy.initramfs
+        local IRONIC_DEPLOY_KERNEL_PATH=$TOP_DIR/files/ir-deploy.kernel
+        local IRONIC_DEPLOY_RAMDISK_PATH=$TOP_DIR/files/ir-deploy.initramfs
     else
-        IRONIC_DEPLOY_KERNEL_PATH=$IRONIC_DEPLOY_KERNEL
-        IRONIC_DEPLOY_RAMDISK_PATH=$IRONIC_DEPLOY_RAMDISK
+        local IRONIC_DEPLOY_KERNEL_PATH=$IRONIC_DEPLOY_KERNEL
+        local IRONIC_DEPLOY_RAMDISK_PATH=$IRONIC_DEPLOY_RAMDISK
     fi
 
     if [ ! -e "$IRONIC_DEPLOY_RAMDISK_PATH" -o ! -e "$IRONIC_DEPLOY_KERNEL_PATH" ]; then
@@ -474,70 +595,63 @@
         if [ "$IRONIC_BUILD_DEPLOY_RAMDISK" = "True" ]; then
             # we can build them only if we're not offline
             if [ "$OFFLINE" != "True" ]; then
-                $DIB_DIR/bin/ramdisk-image-create $IRONIC_DEPLOY_FLAVOR \
-                    -o $TOP_DIR/files/ir-deploy
+                if [ "$IRONIC_DEPLOY_DRIVER" == "agent_ssh" ]; then
+                    build_ipa_coreos_ramdisk $IRONIC_DEPLOY_KERNEL_PATH $IRONIC_DEPLOY_RAMDISK_PATH
+                else
+                    ramdisk-image-create $IRONIC_DEPLOY_FLAVOR \
+                        -o $TOP_DIR/files/ir-deploy
+                fi
             else
                 die $LINENO "Deploy kernel+ramdisk files don't exist and cannot be build in OFFLINE mode"
             fi
         else
-            die $LINENO "Deploy kernel+ramdisk files don't exist and their building was disabled explicitly by IRONIC_BUILD_DEPLOY_RAMDISK"
+            if [ "$IRONIC_DEPLOY_DRIVER" == "agent_ssh" ]; then
+                # download the agent image tarball
+                wget "$IRONIC_AGENT_KERNEL_URL" -O $IRONIC_DEPLOY_KERNEL_PATH
+                wget "$IRONIC_AGENT_RAMDISK_URL" -O $IRONIC_DEPLOY_RAMDISK_PATH
+            else
+                die $LINENO "Deploy kernel+ramdisk files don't exist and their building was disabled explicitly by IRONIC_BUILD_DEPLOY_RAMDISK"
+            fi
         fi
     fi
 
+    local token=$(keystone token-get | grep ' id ' | get_field 2)
+    die_if_not_set $LINENO token "Keystone fail to get token"
+
     # load them into glance
-    IRONIC_DEPLOY_KERNEL_ID=$(glance \
-        --os-auth-token $token \
-        --os-image-url http://$GLANCE_HOSTPORT \
-        image-create \
-        --name $(basename $IRONIC_DEPLOY_KERNEL_PATH) \
-        --is-public True --disk-format=aki \
+    IRONIC_DEPLOY_KERNEL_ID=$(openstack \
+        --os-token $token \
+        --os-url http://$GLANCE_HOSTPORT \
+        image create \
+        $(basename $IRONIC_DEPLOY_KERNEL_PATH) \
+        --public --disk-format=aki \
+        --container-format=aki \
         < $IRONIC_DEPLOY_KERNEL_PATH  | grep ' id ' | get_field 2)
-    IRONIC_DEPLOY_RAMDISK_ID=$(glance \
-        --os-auth-token $token \
-        --os-image-url http://$GLANCE_HOSTPORT \
-        image-create \
-        --name $(basename $IRONIC_DEPLOY_RAMDISK_PATH) \
-        --is-public True --disk-format=ari \
+    IRONIC_DEPLOY_RAMDISK_ID=$(openstack \
+        --os-token $token \
+        --os-url http://$GLANCE_HOSTPORT \
+        image create \
+        $(basename $IRONIC_DEPLOY_RAMDISK_PATH) \
+        --public --disk-format=ari \
+        --container-format=ari \
         < $IRONIC_DEPLOY_RAMDISK_PATH  | grep ' id ' | get_field 2)
 }
 
 function prepare_baremetal_basic_ops {
-
-    # install diskimage-builder
-    git_clone $DIB_REPO $DIB_DIR $DIB_BRANCH
-
-    # make sure all needed service were enabled
-    for srv in nova glance key neutron; do
-        if ! is_service_enabled "$srv"; then
-            die $LINENO "$srv should be enabled for ironic tests"
-        fi
-    done
-
-    TOKEN=$(keystone token-get | grep ' id ' | get_field 2)
-    die_if_not_set $LINENO TOKEN "Keystone fail to get token"
-
-    echo_summary "Creating and uploading baremetal images for ironic"
-
-    # build and upload separate deploy kernel & ramdisk
-    upload_baremetal_ironic_deploy $TOKEN
-
+    upload_baremetal_ironic_deploy
     create_bridge_and_vms
     enroll_vms
     configure_tftpd
     configure_iptables
-
-    # restart nova-compute to ensure its resource tracking is up to
-    # date with newly enrolled nodes
-    stop_nova_compute || true
-    start_nova_compute
+    configure_ironic_auxiliary
 }
 
 function cleanup_baremetal_basic_ops {
     rm -f $IRONIC_VM_MACS_CSV_FILE
     if [ -f $IRONIC_KEY_FILE ]; then
-        KEY=`cat $IRONIC_KEY_FILE.pub`
+        local key=$(cat $IRONIC_KEY_FILE.pub)
         # remove public key from authorized_keys
-        grep -v "$KEY" $IRONIC_AUTHORIZED_KEYS_FILE > temp && mv temp $IRONIC_AUTHORIZED_KEYS_FILE
+        grep -v "$key" $IRONIC_AUTHORIZED_KEYS_FILE > temp && mv temp $IRONIC_AUTHORIZED_KEYS_FILE
         chmod 0600 $IRONIC_AUTHORIZED_KEYS_FILE
     fi
     sudo rm -rf $IRONIC_DATA_DIR $IRONIC_STATE_PATH
diff --git a/lib/keystone b/lib/keystone
index 4e94bad..1c67835 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -6,6 +6,7 @@
 # - ``functions`` file
 # - ``tls`` file
 # - ``DEST``, ``STACK_USER``
+# - ``FILES``
 # - ``IDENTITY_API_VERSION``
 # - ``BASE_SQL_CONN``
 # - ``SERVICE_HOST``, ``SERVICE_PROTOCOL``
@@ -37,7 +38,11 @@
 KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf
 KEYSTONE_PASTE_INI=${KEYSTONE_PASTE_INI:-$KEYSTONE_CONF_DIR/keystone-paste.ini}
 KEYSTONE_AUTH_CACHE_DIR=${KEYSTONE_AUTH_CACHE_DIR:-/var/cache/keystone}
-KEYSTONE_WSGI_DIR=${KEYSTONE_WSGI_DIR:-/var/www/keystone}
+if is_suse; then
+    KEYSTONE_WSGI_DIR=${KEYSTONE_WSGI_DIR:-/srv/www/htdocs/keystone}
+else
+    KEYSTONE_WSGI_DIR=${KEYSTONE_WSGI_DIR:-/var/www/keystone}
+fi
 
 KEYSTONEMIDDLEWARE_DIR=$DEST/keystonemiddleware
 KEYSTONECLIENT_DIR=$DEST/python-keystoneclient
@@ -90,7 +95,7 @@
 KEYSTONE_VALID_ASSIGNMENT_BACKENDS=kvs,ldap,sql
 
 # if we are running with SSL use https protocols
-if is_ssl_enabled_service "key"; then
+if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then
     KEYSTONE_AUTH_PROTOCOL="https"
     KEYSTONE_SERVICE_PROTOCOL="https"
 fi
@@ -104,18 +109,13 @@
 # cleanup_keystone() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
 function cleanup_keystone {
-    # kill instances (nova)
-    # delete image files (glance)
-    # This function intentionally left blank
-    :
+    _cleanup_keystone_apache_wsgi
 }
 
 # _cleanup_keystone_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file
 function _cleanup_keystone_apache_wsgi {
     sudo rm -f $KEYSTONE_WSGI_DIR/*.wsgi
-    disable_apache_site keystone
     sudo rm -f $(apache_site_config_for keystone)
-    restart_apache_server
 }
 
 # _config_keystone_apache_wsgi() - Set WSGI config files of Keystone
@@ -123,6 +123,21 @@
     sudo mkdir -p $KEYSTONE_WSGI_DIR
 
     local keystone_apache_conf=$(apache_site_config_for keystone)
+    local keystone_ssl=""
+    local keystone_certfile=""
+    local keystone_keyfile=""
+    local keystone_service_port=$KEYSTONE_SERVICE_PORT
+    local keystone_auth_port=$KEYSTONE_AUTH_PORT
+
+    if is_ssl_enabled_service key; then
+        keystone_ssl="SSLEngine On"
+        keystone_certfile="SSLCertificateFile $KEYSTONE_SSL_CERT"
+        keystone_keyfile="SSLCertificateKeyFile $KEYSTONE_SSL_KEY"
+    fi
+    if is_service_enabled tls-proxy; then
+        keystone_service_port=$KEYSTONE_SERVICE_PORT_INT
+        keystone_auth_port=$KEYSTONE_AUTH_PORT_INT
+    fi
 
     # copy proxy vhost and wsgi file
     sudo cp $KEYSTONE_DIR/httpd/keystone.py $KEYSTONE_WSGI_DIR/main
@@ -130,14 +145,16 @@
 
     sudo cp $FILES/apache-keystone.template $keystone_apache_conf
     sudo sed -e "
-        s|%PUBLICPORT%|$KEYSTONE_SERVICE_PORT|g;
-        s|%ADMINPORT%|$KEYSTONE_AUTH_PORT|g;
+        s|%PUBLICPORT%|$keystone_service_port|g;
+        s|%ADMINPORT%|$keystone_auth_port|g;
         s|%APACHE_NAME%|$APACHE_NAME|g;
         s|%PUBLICWSGI%|$KEYSTONE_WSGI_DIR/main|g;
         s|%ADMINWSGI%|$KEYSTONE_WSGI_DIR/admin|g;
+        s|%SSLENGINE%|$keystone_ssl|g;
+        s|%SSLCERTFILE%|$keystone_certfile|g;
+        s|%SSLKEYFILE%|$keystone_keyfile|g;
         s|%USER%|$STACK_USER|g
     " -i $keystone_apache_conf
-    enable_apache_site keystone
 }
 
 # configure_keystone() - Set config files, create data dirs, etc
@@ -200,8 +217,13 @@
     fi
 
     # Set the URL advertised in the ``versions`` structure returned by the '/' route
-    iniset $KEYSTONE_CONF DEFAULT public_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(public_port)s/"
-    iniset $KEYSTONE_CONF DEFAULT admin_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(admin_port)s/"
+    if is_service_enabled tls-proxy; then
+        iniset $KEYSTONE_CONF DEFAULT public_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/"
+        iniset $KEYSTONE_CONF DEFAULT admin_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/"
+    else
+        iniset $KEYSTONE_CONF DEFAULT public_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(public_port)s/"
+        iniset $KEYSTONE_CONF DEFAULT admin_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(admin_port)s/"
+    fi
     iniset $KEYSTONE_CONF DEFAULT admin_bind_host "$KEYSTONE_ADMIN_BIND_HOST"
 
     # Register SSL certificates if provided
@@ -229,11 +251,11 @@
     iniset $KEYSTONE_CONF ec2 driver "keystone.contrib.ec2.backends.sql.Ec2"
 
     if [[ "$KEYSTONE_TOKEN_BACKEND" = "sql" ]]; then
-        iniset $KEYSTONE_CONF token driver keystone.token.backends.sql.Token
+        iniset $KEYSTONE_CONF token driver keystone.token.persistence.backends.sql.Token
     elif [[ "$KEYSTONE_TOKEN_BACKEND" = "memcache" ]]; then
-        iniset $KEYSTONE_CONF token driver keystone.token.backends.memcache.Token
+        iniset $KEYSTONE_CONF token driver keystone.token.persistence.backends.memcache.Token
     else
-        iniset $KEYSTONE_CONF token driver keystone.token.backends.kvs.Token
+        iniset $KEYSTONE_CONF token driver keystone.token.persistence.backends.kvs.Token
     fi
 
     if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then
@@ -280,7 +302,7 @@
     fi
 
     if [ "$KEYSTONE_USE_MOD_WSGI" == "True" ]; then
-        iniset $KEYSTONE_CONF DEFAULT debug "True"
+        iniset $KEYSTONE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
         # Eliminate the %(asctime)s.%(msecs)03d from the log format strings
         iniset $KEYSTONE_CONF DEFAULT logging_context_format_string "%(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s"
         iniset $KEYSTONE_CONF DEFAULT logging_default_format_string "%(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s"
@@ -290,6 +312,13 @@
     fi
 
     iniset $KEYSTONE_CONF DEFAULT max_token_size 16384
+
+    iniset $KEYSTONE_CONF DEFAULT admin_workers "$API_WORKERS"
+    # Public workers will use the server default, typically number of CPU.
+
+    if [[ -n "$KEYSTONE_TOKEN_HASH_ALGORITHM" ]]; then
+        iniset $KEYSTONE_CONF token hash_algorithm "$KEYSTONE_TOKEN_HASH_ALGORITHM"
+    fi
 }
 
 function configure_keystone_extensions {
@@ -328,11 +357,11 @@
 function create_keystone_accounts {
 
     # admin
-    ADMIN_TENANT=$(get_or_create_project "admin")
-    ADMIN_USER=$(get_or_create_user "admin" \
-        "$ADMIN_PASSWORD" "$ADMIN_TENANT" "admin@example.com")
-    ADMIN_ROLE=$(get_or_create_role "admin")
-    get_or_add_user_role $ADMIN_ROLE $ADMIN_USER $ADMIN_TENANT
+    local admin_tenant=$(get_or_create_project "admin")
+    local admin_user=$(get_or_create_user "admin" \
+        "$ADMIN_PASSWORD" "$admin_tenant")
+    local admin_role=$(get_or_create_role "admin")
+    get_or_add_user_role $admin_role $admin_user $admin_tenant
 
     # Create service project/role
     get_or_create_project "$SERVICE_TENANT_NAME"
@@ -347,25 +376,24 @@
     get_or_create_role ResellerAdmin
 
     # The Member role is used by Horizon and Swift so we need to keep it:
-    MEMBER_ROLE=$(get_or_create_role "Member")
+    local member_role=$(get_or_create_role "Member")
 
-    # ANOTHER_ROLE demonstrates that an arbitrary role may be created and used
+    # another_role demonstrates that an arbitrary role may be created and used
     # TODO(sleepsonthefloor): show how this can be used for rbac in the future!
-
-    ANOTHER_ROLE=$(get_or_create_role "anotherrole")
+    local another_role=$(get_or_create_role "anotherrole")
 
     # invisible tenant - admin can't see this one
-    INVIS_TENANT=$(get_or_create_project "invisible_to_admin")
+    local invis_tenant=$(get_or_create_project "invisible_to_admin")
 
     # demo
-    DEMO_TENANT=$(get_or_create_project "demo")
-    DEMO_USER=$(get_or_create_user "demo" \
-        "$ADMIN_PASSWORD" "$DEMO_TENANT" "demo@example.com")
+    local demo_tenant=$(get_or_create_project "demo")
+    local demo_user=$(get_or_create_user "demo" \
+        "$ADMIN_PASSWORD" "$demo_tenant" "demo@example.com")
 
-    get_or_add_user_role $MEMBER_ROLE $DEMO_USER $DEMO_TENANT
-    get_or_add_user_role $ADMIN_ROLE $ADMIN_USER $DEMO_TENANT
-    get_or_add_user_role $ANOTHER_ROLE $DEMO_USER $DEMO_TENANT
-    get_or_add_user_role $MEMBER_ROLE $DEMO_USER $INVIS_TENANT
+    get_or_add_user_role $member_role $demo_user $demo_tenant
+    get_or_add_user_role $admin_role $admin_user $demo_tenant
+    get_or_add_user_role $another_role $demo_user $demo_tenant
+    get_or_add_user_role $member_role $demo_user $invis_tenant
 
     # Keystone
     if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
@@ -381,11 +409,40 @@
 }
 
 # Configure the API version for the OpenStack projects.
-# configure_API_version conf_file version
+# configure_API_version conf_file version [section]
 function configure_API_version {
     local conf_file=$1
     local api_version=$2
-    iniset $conf_file keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$api_version
+    local section=${3:-keystone_authtoken}
+    iniset $conf_file $section auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$api_version
+}
+
+# Configure the service to use the auth token middleware.
+#
+# configure_auth_token_middleware conf_file admin_user signing_dir [section]
+#
+# section defaults to keystone_authtoken, which is where auth_token looks in
+# the .conf file. If the paste config file is used (api-paste.ini) then
+# provide the section name for the auth_token filter.
+function configure_auth_token_middleware {
+    local conf_file=$1
+    local admin_user=$2
+    local signing_dir=$3
+    local section=${4:-keystone_authtoken}
+
+    iniset $conf_file $section auth_host $KEYSTONE_AUTH_HOST
+    iniset $conf_file $section auth_port $KEYSTONE_AUTH_PORT
+    iniset $conf_file $section auth_protocol $KEYSTONE_AUTH_PROTOCOL
+    iniset $conf_file $section identity_uri $KEYSTONE_AUTH_URI
+    iniset $conf_file $section cafile $SSL_BUNDLE_FILE
+    configure_API_version $conf_file $IDENTITY_API_VERSION $section
+    iniset $conf_file $section admin_tenant_name $SERVICE_TENANT_NAME
+    iniset $conf_file $section admin_user $admin_user
+    iniset $conf_file $section admin_password $SERVICE_PASSWORD
+    iniset $conf_file $section signing_dir $signing_dir
+    if [[ -n "$KEYSTONE_TOKEN_HASH_ALGORITHM" ]]; then
+        iniset $conf_file keystone_authtoken hash_algorithms "$KEYSTONE_TOKEN_HASH_ALGORITHM"
+    fi
 }
 
 # init_keystone() - Initialize databases, etc.
@@ -454,6 +511,9 @@
     setup_develop $KEYSTONE_DIR
     if [ "$KEYSTONE_USE_MOD_WSGI" == "True" ]; then
         install_apache_wsgi
+        if is_ssl_enabled_service "key"; then
+            enable_mod_ssl
+        fi
     fi
 }
 
@@ -461,23 +521,31 @@
 function start_keystone {
     # Get right service port for testing
     local service_port=$KEYSTONE_SERVICE_PORT
+    local auth_protocol=$KEYSTONE_AUTH_PROTOCOL
     if is_service_enabled tls-proxy; then
         service_port=$KEYSTONE_SERVICE_PORT_INT
+        auth_protocol="http"
     fi
 
     if [ "$KEYSTONE_USE_MOD_WSGI" == "True" ]; then
+        enable_apache_site keystone
         restart_apache_server
-        screen_it key "cd $KEYSTONE_DIR && sudo tail -f /var/log/$APACHE_NAME/keystone"
+        tail_log key /var/log/$APACHE_NAME/keystone.log
+        tail_log key-access /var/log/$APACHE_NAME/keystone_access.log
     else
+        local EXTRA_PARAMS=""
+        if [ "$ENABLE_DEBUG_LOG_LEVEL" == "True" ]; then
+            EXTRA_PARAMS="--debug"
+        fi
         # Start Keystone in a screen window
-        screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF --debug"
+        run_process key "$KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $EXTRA_PARAMS"
     fi
 
     echo "Waiting for keystone to start..."
     # Check that the keystone service is running. Even if the tls tunnel
     # should be enabled, make sure the internal port is checked using
     # unencryted traffic at this point.
-    if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -k -s http://$KEYSTONE_SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then
+    if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -k -s $auth_protocol://$KEYSTONE_SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then
         die $LINENO "keystone did not start"
     fi
 
@@ -490,12 +558,17 @@
 
 # stop_keystone() - Stop running processes
 function stop_keystone {
+    if [ "$KEYSTONE_USE_MOD_WSGI" == "True" ]; then
+        disable_apache_site keystone
+        restart_apache_server
+    fi
     # Kill the Keystone screen window
-    screen_stop key
-    # Cleanup the WSGI files and VHOST
-    _cleanup_keystone_apache_wsgi
+    stop_process key
 }
 
+function is_keystone_enabled {
+    return is_service_enabled key
+}
 
 # Restore xtrace
 $XTRACE
diff --git a/lib/ldap b/lib/ldap
index efe2f09..2bb8a4c 100644
--- a/lib/ldap
+++ b/lib/ldap
@@ -79,7 +79,7 @@
 function init_ldap {
     local keystone_ldif
 
-    TMP_LDAP_DIR=$(mktemp -d -t ldap.$$.XXXXXXXXXX)
+    local tmp_ldap_dir=$(mktemp -d -t ldap.$$.XXXXXXXXXX)
 
     # Remove data but not schemas
     clear_ldap_state
@@ -91,17 +91,17 @@
         printf "Configuring LDAP for $LDAP_BASE_DC\n"
         # If BASE_DN is changed, the user may override the default file
         if [[ -r $FILES/ldap/${LDAP_BASE_DC}.ldif.in ]]; then
-            keystone_ldif=${LDAP_BASE_DC}.ldif
+            local keystone_ldif=${LDAP_BASE_DC}.ldif
         else
-            keystone_ldif=keystone.ldif
+            local keystone_ldif=keystone.ldif
         fi
-        _ldap_varsubst $FILES/ldap/${keystone_ldif}.in >$TMP_LDAP_DIR/${keystone_ldif}
-        if [[ -r $TMP_LDAP_DIR/${keystone_ldif} ]]; then
-            ldapadd -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -c -f $TMP_LDAP_DIR/${keystone_ldif}
+        _ldap_varsubst $FILES/ldap/${keystone_ldif}.in >$tmp_ldap_dir/${keystone_ldif}
+        if [[ -r $tmp_ldap_dir/${keystone_ldif} ]]; then
+            ldapadd -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -c -f $tmp_ldap_dir/${keystone_ldif}
         fi
     fi
 
-    rm -rf TMP_LDAP_DIR
+    rm -rf $tmp_ldap_dir
 }
 
 # install_ldap
@@ -110,7 +110,7 @@
     echo "Installing LDAP inside function"
     echo "os_VENDOR is $os_VENDOR"
 
-    TMP_LDAP_DIR=$(mktemp -d -t ldap.$$.XXXXXXXXXX)
+    local tmp_ldap_dir=$(mktemp -d -t ldap.$$.XXXXXXXXXX)
 
     printf "installing OpenLDAP"
     if is_ubuntu; then
@@ -119,19 +119,19 @@
     elif is_fedora; then
         start_ldap
     elif is_suse; then
-        _ldap_varsubst $FILES/ldap/suse-base-config.ldif.in >$TMP_LDAP_DIR/suse-base-config.ldif
-        sudo slapadd -F /etc/openldap/slapd.d/ -bcn=config -l $TMP_LDAP_DIR/suse-base-config.ldif
+        _ldap_varsubst $FILES/ldap/suse-base-config.ldif.in >$tmp_ldap_dir/suse-base-config.ldif
+        sudo slapadd -F /etc/openldap/slapd.d/ -bcn=config -l $tmp_ldap_dir/suse-base-config.ldif
         sudo sed -i '/^OPENLDAP_START_LDAPI=/s/"no"/"yes"/g' /etc/sysconfig/openldap
         start_ldap
     fi
 
     echo "LDAP_PASSWORD is $LDAP_PASSWORD"
-    SLAPPASS=$(slappasswd -s $LDAP_PASSWORD)
-    printf "LDAP secret is $SLAPPASS\n"
+    local slappass=$(slappasswd -s $LDAP_PASSWORD)
+    printf "LDAP secret is $slappass\n"
 
     # Create manager.ldif and add to olcdb
-    _ldap_varsubst $FILES/ldap/manager.ldif.in >$TMP_LDAP_DIR/manager.ldif
-    sudo ldapmodify -Y EXTERNAL -H ldapi:/// -f $TMP_LDAP_DIR/manager.ldif
+    _ldap_varsubst $FILES/ldap/manager.ldif.in >$tmp_ldap_dir/manager.ldif
+    sudo ldapmodify -Y EXTERNAL -H ldapi:/// -f $tmp_ldap_dir/manager.ldif
 
     # On fedora we need to manually add cosine and inetorgperson schemas
     if is_fedora; then
@@ -139,7 +139,7 @@
         sudo ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/inetorgperson.ldif
     fi
 
-    rm -rf TMP_LDAP_DIR
+    rm -rf $tmp_ldap_dir
 }
 
 # start_ldap() - Start LDAP
diff --git a/lib/marconi b/lib/marconi
deleted file mode 100644
index d7822c9..0000000
--- a/lib/marconi
+++ /dev/null
@@ -1,204 +0,0 @@
-# lib/marconi
-# Install and start **Marconi** service
-
-# To enable a minimal set of Marconi services, add the following to localrc:
-#
-#     enable_service marconi-server
-#
-# Dependencies:
-# - functions
-# - OS_AUTH_URL for auth in api
-# - DEST set to the destination directory
-# - SERVICE_PASSWORD, SERVICE_TENANT_NAME for auth in api
-# - STACK_USER service user
-
-# stack.sh
-# ---------
-# install_marconi
-# configure_marconi
-# init_marconi
-# start_marconi
-# stop_marconi
-# cleanup_marconi
-
-# Save trace setting
-XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-
-# Defaults
-# --------
-
-# Set up default directories
-MARCONI_DIR=$DEST/marconi
-MARCONICLIENT_DIR=$DEST/python-marconiclient
-MARCONI_CONF_DIR=/etc/marconi
-MARCONI_CONF=$MARCONI_CONF_DIR/marconi.conf
-MARCONI_API_LOG_DIR=/var/log/marconi
-MARCONI_API_LOG_FILE=$MARCONI_API_LOG_DIR/queues.log
-MARCONI_AUTH_CACHE_DIR=${MARCONI_AUTH_CACHE_DIR:-/var/cache/marconi}
-
-# Support potential entry-points console scripts
-MARCONI_BIN_DIR=$(get_python_exec_prefix)
-
-# Set up database backend
-MARCONI_BACKEND=${MARCONI_BACKEND:-sqlite}
-
-
-# Set Marconi repository
-MARCONI_REPO=${MARCONI_REPO:-${GIT_BASE}/openstack/marconi.git}
-MARCONI_BRANCH=${MARCONI_BRANCH:-master}
-
-# Set client library repository
-MARCONICLIENT_REPO=${MARCONICLIENT_REPO:-${GIT_BASE}/openstack/python-marconiclient.git}
-MARCONICLIENT_BRANCH=${MARCONICLIENT_BRANCH:-master}
-
-# Set Marconi Connection Info
-MARCONI_SERVICE_HOST=${MARCONI_SERVICE_HOST:-$SERVICE_HOST}
-MARCONI_SERVICE_PORT=${MARCONI_SERVICE_PORT:-8888}
-MARCONI_SERVICE_PROTOCOL=${MARCONI_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
-
-# Tell Tempest this project is present
-TEMPEST_SERVICES+=,marconi
-
-
-# Functions
-# ---------
-
-# Test if any Marconi services are enabled
-# is_marconi_enabled
-function is_marconi_enabled {
-    [[ ,${ENABLED_SERVICES} =~ ,"marconi-" ]] && return 0
-    return 1
-}
-
-# cleanup_marconi() - Remove residual data files, anything left over from previous
-# runs that a clean run would need to clean up
-function cleanup_marconi {
-    if ! timeout $SERVICE_TIMEOUT sh -c "while ! mongo marconi --eval 'db.dropDatabase();'; do sleep 1; done"; then
-        die $LINENO "Mongo DB did not start"
-    fi
-}
-
-# configure_marconiclient() - Set config files, create data dirs, etc
-function configure_marconiclient {
-    setup_develop $MARCONICLIENT_DIR
-}
-
-# configure_marconi() - Set config files, create data dirs, etc
-function configure_marconi {
-    setup_develop $MARCONI_DIR
-
-    [ ! -d $MARCONI_CONF_DIR ] && sudo mkdir -m 755 -p $MARCONI_CONF_DIR
-    sudo chown $USER $MARCONI_CONF_DIR
-
-    [ ! -d $MARCONI_API_LOG_DIR ] &&  sudo mkdir -m 755 -p $MARCONI_API_LOG_DIR
-    sudo chown $USER $MARCONI_API_LOG_DIR
-
-    iniset $MARCONI_CONF DEFAULT verbose True
-    iniset $MARCONI_CONF DEFAULT use_syslog $SYSLOG
-    iniset $MARCONI_CONF DEFAULT log_file $MARCONI_API_LOG_FILE
-    iniset $MARCONI_CONF 'drivers:transport:wsgi' bind $MARCONI_SERVICE_HOST
-
-    iniset $MARCONI_CONF keystone_authtoken auth_protocol http
-    iniset $MARCONI_CONF keystone_authtoken admin_user marconi
-    iniset $MARCONI_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
-    iniset $MARCONI_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
-    iniset $MARCONI_CONF keystone_authtoken signing_dir $MARCONI_AUTH_CACHE_DIR
-
-    if [ "$MARCONI_BACKEND" = 'mysql' ] || [ "$MARCONI_BACKEND" = 'postgresql' ] ; then
-        iniset $MARCONI_CONF drivers storage sqlalchemy
-        iniset $MARCONI_CONF 'drivers:storage:sqlalchemy' uri `database_connection_url marconi`
-    elif [ "$MARCONI_BACKEND" = 'mongodb' ] ; then
-        iniset $MARCONI_CONF  drivers storage mongodb
-        iniset $MARCONI_CONF 'drivers:storage:mongodb' uri mongodb://localhost:27017/marconi
-        configure_mongodb
-        cleanup_marconi
-    fi
-}
-
-function configure_mongodb {
-    # Set nssize to 2GB. This increases the number of namespaces supported
-    # # per database.
-    if is_ubuntu; then
-        sudo sed -i -e "
-            s|[^ \t]*#[ \t]*\(nssize[ \t]*=.*\$\)|\1|
-            s|^\(nssize[ \t]*=[ \t]*\).*\$|\1 2047|
-        " /etc/mongodb.conf
-        restart_service mongodb
-    elif is_fedora; then
-        sudo sed -i '/--nssize/!s/OPTIONS=\"/OPTIONS=\"--nssize 2047 /' /etc/sysconfig/mongod
-        restart_service mongod
-    fi
-}
-
-# init_marconi() - Initialize etc.
-function init_marconi {
-    # Create cache dir
-    sudo mkdir -p $MARCONI_AUTH_CACHE_DIR
-    sudo chown $STACK_USER $MARCONI_AUTH_CACHE_DIR
-    rm -f $MARCONI_AUTH_CACHE_DIR/*
-}
-
-# install_marconi() - Collect source and prepare
-function install_marconi {
-    git_clone $MARCONI_REPO $MARCONI_DIR $MARCONI_BRANCH
-    setup_develop $MARCONI_DIR
-}
-
-# install_marconiclient() - Collect source and prepare
-function install_marconiclient {
-    git_clone $MARCONICLIENT_REPO $MARCONICLIENT_DIR $MARCONICLIENT_BRANCH
-    setup_develop $MARCONICLIENT_DIR
-}
-
-# start_marconi() - Start running processes, including screen
-function start_marconi {
-    if [[ "$USE_SCREEN" = "False" ]]; then
-        screen_it marconi-server "marconi-server --config-file $MARCONI_CONF --daemon"
-    else
-        screen_it marconi-server "marconi-server --config-file $MARCONI_CONF"
-    fi
-
-    echo "Waiting for Marconi to start..."
-    if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- $MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT/v1/health; do sleep 1; done"; then
-        die $LINENO "Marconi did not start"
-    fi
-}
-
-# stop_marconi() - Stop running processes
-function stop_marconi {
-    # Kill the marconi screen windows
-    for serv in marconi-server; do
-        screen -S $SCREEN_NAME -p $serv -X kill
-    done
-}
-
-function create_marconi_accounts {
-    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
-
-    MARCONI_USER=$(get_or_create_user "marconi" \
-        "$SERVICE_PASSWORD" $SERVICE_TENANT "marconi@example.com")
-    get_or_add_user_role $ADMIN_ROLE $MARCONI_USER $SERVICE_TENANT
-
-    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-
-        MARCONI_SERVICE=$(get_or_create_service "marconi" \
-            "queuing" "Marconi Service")
-        get_or_create_endpoint $MARCONI_SERVICE \
-            "$REGION_NAME" \
-            "$MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT" \
-            "$MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT" \
-            "$MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT"
-    fi
-
-}
-
-
-# Restore xtrace
-$XTRACE
-
-# Local variables:
-# mode: shell-script
-# End:
diff --git a/lib/neutron b/lib/neutron
index 8b883b1..81f2697 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -69,6 +69,11 @@
 PRIVATE_SUBNET_NAME=${PRIVATE_SUBNET_NAME:-"private-subnet"}
 PUBLIC_SUBNET_NAME=${PUBLIC_SUBNET_NAME:-"public-subnet"}
 
+if is_ssl_enabled_service "neutron" || is_service_enabled tls-proxy; then
+    Q_PROTOCOL="https"
+fi
+
+
 # Set up default directories
 NEUTRON_DIR=$DEST/neutron
 NEUTRONCLIENT_DIR=$DEST/python-neutronclient
@@ -85,12 +90,32 @@
 NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf
 export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"}
 
+# Agent binaries.  Note, binary paths for other agents are set in per-service
+# scripts in lib/neutron_plugins/services/
+AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent"
+AGENT_L3_BINARY=${AGENT_L3_BINARY:-"$NEUTRON_BIN_DIR/neutron-l3-agent"}
+AGENT_META_BINARY="$NEUTRON_BIN_DIR/neutron-metadata-agent"
+
+# Agent config files. Note, plugin-specific Q_PLUGIN_CONF_FILE is set and
+# loaded from per-plugin  scripts in lib/neutron_plugins/
+Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini
+Q_L3_CONF_FILE=$NEUTRON_CONF_DIR/l3_agent.ini
+Q_FWAAS_CONF_FILE=$NEUTRON_CONF_DIR/fwaas_driver.ini
+Q_VPN_CONF_FILE=$NEUTRON_CONF_DIR/vpn_agent.ini
+Q_META_CONF_FILE=$NEUTRON_CONF_DIR/metadata_agent.ini
+
+# Default name for Neutron database
+Q_DB_NAME=${Q_DB_NAME:-neutron}
 # Default Neutron Plugin
 Q_PLUGIN=${Q_PLUGIN:-ml2}
 # Default Neutron Port
 Q_PORT=${Q_PORT:-9696}
+# Default Neutron Internal Port when using TLS proxy
+Q_PORT_INT=${Q_PORT_INT:-19696}
 # Default Neutron Host
 Q_HOST=${Q_HOST:-$SERVICE_HOST}
+# Default protocol
+Q_PROTOCOL=${Q_PROTOCOL:-$SERVICE_PROTOCOL}
 # Default admin username
 Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-neutron}
 # Default auth strategy
@@ -118,6 +143,21 @@
 ## Provider Network Information
 PROVIDER_SUBNET_NAME=${PROVIDER_SUBNET_NAME:-"provider_net"}
 
+# Use flat providernet for public network
+#
+# If Q_USE_PROVIDERNET_FOR_PUBLIC=True, use a flat provider network
+# for external interface of neutron l3-agent.  In that case,
+# PUBLIC_PHYSICAL_NETWORK specifies provider:physical_network value
+# used for the network.  In case of openvswitch agent, you should
+# add the corresponding entry to your OVS_BRIDGE_MAPPINGS.
+#
+# eg.
+#    Q_USE_PROVIDERNET_FOR_PUBLIC=True
+#    PUBLIC_PHYSICAL_NETWORK=public
+#    OVS_BRIDGE_MAPPINGS=public:br-ex
+Q_USE_PROVIDERNET_FOR_PUBLIC=${Q_USE_PROVIDERNET_FOR_PUBLIC:-False}
+PUBLIC_PHYSICAL_NETWORK=${PUBLIC_PHYSICAL_NETWORK:-public}
+
 # The next two variables are configured by plugin
 # e.g.  _configure_neutron_l3_agent or lib/neutron_plugins/*
 #
@@ -143,6 +183,17 @@
     Q_RR_COMMAND="sudo $NEUTRON_ROOTWRAP $Q_RR_CONF_FILE"
 fi
 
+
+# Distributed Virtual Router (DVR) configuration
+# Can be:
+#     legacy   - No DVR functionality
+#     dvr_snat - Controller or single node DVR
+#     dvr      - Compute node in multi-node DVR
+Q_DVR_MODE=${Q_DVR_MODE:-legacy}
+if [[ "$Q_DVR_MODE" != "legacy" ]]; then
+    Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,linuxbridge,l2population
+fi
+
 # Provider Network Configurations
 # --------------------------------
 
@@ -153,10 +204,10 @@
 # remote connectivity), and no physical resources will be
 # available for the allocation of provider networks.
 
-# To use GRE tunnels for tenant networks, set to True in
-# ``localrc``. GRE tunnels are only supported by the openvswitch
-# plugin, and currently only on Ubuntu.
-ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-False}
+# To disable tunnels (GRE or VXLAN) for tenant networks,
+# set to False in ``local.conf``.
+# GRE tunnels are only supported by the openvswitch.
+ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-True}
 
 # If using GRE tunnels for tenant networks, specify the range of
 # tunnel IDs from which tenant networks are allocated. Can be
@@ -262,6 +313,51 @@
 # Functions
 # ---------
 
+function _determine_config_server {
+    local cfg_file
+    local opts="--config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE"
+    for cfg_file in ${Q_PLUGIN_EXTRA_CONF_FILES[@]}; do
+        opts+=" --config-file /$cfg_file"
+    done
+    echo "$opts"
+}
+
+function _determine_config_vpn {
+    local cfg_file
+    local opts="--config-file $NEUTRON_CONF --config-file=$Q_L3_CONF_FILE --config-file=$Q_VPN_CONF_FILE"
+    if is_service_enabled q-fwaas; then
+        opts+=" --config-file $Q_FWAAS_CONF_FILE"
+    fi
+    for cfg_file in ${Q_VPN_EXTRA_CONF_FILES[@]}; do
+        opts+=" --config-file $cfg_file"
+    done
+    echo "$opts"
+
+}
+
+function _determine_config_l3 {
+    local opts="--config-file $NEUTRON_CONF --config-file=$Q_L3_CONF_FILE"
+    if is_service_enabled q-fwaas; then
+        opts+=" --config-file $Q_FWAAS_CONF_FILE"
+    fi
+    echo "$opts"
+}
+
+# For services and agents that require it, dynamically construct a list of
+# --config-file arguments that are passed to the binary.
+function determine_config_files {
+    local opts=""
+    case "$1" in
+        "neutron-server") opts="$(_determine_config_server)" ;;
+        "neutron-vpn-agent") opts="$(_determine_config_vpn)" ;;
+        "neutron-l3-agent") opts="$(_determine_config_l3)" ;;
+    esac
+    if [ -z "$opts" ] ; then
+        die $LINENO "Could not determine config files for $1."
+    fi
+    echo "$opts"
+}
+
 # Test if any Neutron services are enabled
 # is_neutron_enabled
 function is_neutron_enabled {
@@ -303,6 +399,10 @@
     if is_service_enabled q-meta; then
         _configure_neutron_metadata_agent
     fi
+
+    if [[ "$Q_DVR_MODE" != "legacy" ]]; then
+        _configure_dvr
+    fi
     if is_service_enabled ceilometer; then
         _configure_neutron_ceilometer_notifications
     fi
@@ -318,7 +418,7 @@
     iniset $NOVA_CONF neutron auth_strategy "$Q_AUTH_STRATEGY"
     iniset $NOVA_CONF neutron admin_tenant_name "$SERVICE_TENANT_NAME"
     iniset $NOVA_CONF neutron region_name "$REGION_NAME"
-    iniset $NOVA_CONF neutron url "http://$Q_HOST:$Q_PORT"
+    iniset $NOVA_CONF neutron url "${Q_PROTOCOL}://$Q_HOST:$Q_PORT"
 
     if [[ "$Q_USE_SECGROUP" == "True" ]]; then
         LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver
@@ -332,7 +432,7 @@
     iniset $NOVA_CONF libvirt vif_driver "$NOVA_VIF_DRIVER"
     iniset $NOVA_CONF DEFAULT linuxnet_interface_driver "$LINUXNET_VIF_DRIVER"
     if is_service_enabled q-meta; then
-        iniset $NOVA_CONF DEFAULT service_neutron_metadata_proxy "True"
+        iniset $NOVA_CONF neutron service_metadata_proxy "True"
     fi
 
     iniset $NOVA_CONF DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL"
@@ -356,24 +456,24 @@
 # Migrated from keystone_data.sh
 function create_neutron_accounts {
 
-    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
+    local service_tenant=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    local admin_role=$(openstack role list | awk "/ admin / { print \$2 }")
 
     if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then
 
-        NEUTRON_USER=$(get_or_create_user "neutron" \
-            "$SERVICE_PASSWORD" $SERVICE_TENANT "neutron@example.com")
-        get_or_add_user_role $ADMIN_ROLE $NEUTRON_USER $SERVICE_TENANT
+        local neutron_user=$(get_or_create_user "neutron" \
+            "$SERVICE_PASSWORD" $service_tenant)
+        get_or_add_user_role $admin_role $neutron_user $service_tenant
 
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
 
-            NEUTRON_SERVICE=$(get_or_create_service "neutron" \
+            local neutron_service=$(get_or_create_service "neutron" \
                 "network" "Neutron Service")
-            get_or_create_endpoint $NEUTRON_SERVICE \
+            get_or_create_endpoint $neutron_service \
                 "$REGION_NAME" \
-                "http://$SERVICE_HOST:9696/" \
-                "http://$SERVICE_HOST:9696/" \
-                "http://$SERVICE_HOST:9696/"
+                "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/" \
+                "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/" \
+                "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/"
         fi
     fi
 }
@@ -401,9 +501,9 @@
         sudo ifconfig $OVS_PHYSICAL_BRIDGE up
         sudo route add default gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE
     elif is_provider_network; then
-        die_if_not_set $LINENO SEGMENTATION_ID "A SEGMENTATION_ID is required to use provider networking"
+        die_if_not_set $LINENO PHYSICAL_NETWORK "You must specify the PHYSICAL_NETWORK"
         die_if_not_set $LINENO PROVIDER_NETWORK_TYPE "You must specifiy the PROVIDER_NETWORK_TYPE"
-        NET_ID=$(neutron net-create $PHYSICAL_NETWORK --tenant_id $TENANT_ID --provider:network_type $PROVIDER_NETWORK_TYPE --provider:physical_network "$PHYSICAL_NETWORK" --provider:segmentation_id "$SEGMENTATION_ID" --shared | grep ' id ' | get_field 2)
+        NET_ID=$(neutron net-create $PHYSICAL_NETWORK --tenant_id $TENANT_ID --provider:network_type $PROVIDER_NETWORK_TYPE --provider:physical_network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider:segmentation_id $SEGMENTATION_ID} --shared | grep ' id ' | get_field 2)
         SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --name $PROVIDER_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2)
         SUBNET_V6_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 6 --ipv6-address-mode slaac --gateway $V6_NETWORK_GATEWAY --name $PROVIDER_SUBNET_NAME_V6 $NET_ID $FIXED_RANGE_V6 | grep 'id' | get_field 2)
         sudo ip link set $OVS_PHYSICAL_BRIDGE up
@@ -429,7 +529,11 @@
         fi
         neutron router-interface-add $ROUTER_ID $SUBNET_ID
         # Create an external network, and a subnet. Configure the external network as router gw
-        EXT_NET_ID=$(neutron net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2)
+        if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then
+            EXT_NET_ID=$(neutron net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True --provider:network_type=flat --provider:physical_network=${PUBLIC_PHYSICAL_NETWORK} | grep ' id ' | get_field 2)
+        else
+            EXT_NET_ID=$(neutron net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2)
+        fi
         die_if_not_set $LINENO EXT_NET_ID "Failure creating EXT_NET_ID for $PUBLIC_NETWORK_NAME"
         EXT_GW_IP=$(neutron subnet-create --ip_version 4 ${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} --gateway $PUBLIC_NETWORK_GATEWAY --name $PUBLIC_SUBNET_NAME $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2)
         die_if_not_set $LINENO EXT_GW_IP "Failure creating EXT_GW_IP"
@@ -438,6 +542,9 @@
         if is_service_enabled q-l3; then
             # logic is specific to using the l3-agent for l3
             if is_neutron_ovs_base_plugin && [[ "$Q_USE_NAMESPACE" = "True" ]]; then
+                # Disable in-band as we are going to use local port
+                # to communicate with VMs
+                sudo ovs-vsctl set Bridge $PUBLIC_BRIDGE other_config:disable-in-band=true
                 CIDR_LEN=${FLOATING_RANGE#*/}
                 sudo ip addr add $EXT_GW_IP/$CIDR_LEN dev $PUBLIC_BRIDGE
                 sudo ip link set $PUBLIC_BRIDGE up
@@ -475,6 +582,10 @@
 
 # install_neutron_agent_packages() - Collect source and prepare
 function install_neutron_agent_packages {
+    # radvd doesn't come with the OS. Install it if the l3 service is enabled.
+    if is_service_enabled q-l3; then
+        install_package radvd
+    fi
     # install packages that are specific to plugin agent(s)
     if is_service_enabled q-agt q-dhcp q-l3; then
         neutron_plugin_install_agent_packages
@@ -487,27 +598,33 @@
 
 # Start running processes, including screen
 function start_neutron_service_and_check {
-    # build config-file options
-    local cfg_file
-    local CFG_FILE_OPTIONS="--config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE"
-    for cfg_file in ${Q_PLUGIN_EXTRA_CONF_FILES[@]}; do
-        CFG_FILE_OPTIONS+=" --config-file /$cfg_file"
-    done
+    local cfg_file_options="$(determine_config_files neutron-server)"
+    local service_port=$Q_PORT
+    local service_protocol=$Q_PROTOCOL
+    if is_service_enabled tls-proxy; then
+        service_port=$Q_PORT_INT
+        service_protocol="http"
+    fi
     # Start the Neutron service
-    screen_it q-svc "cd $NEUTRON_DIR && python $NEUTRON_BIN_DIR/neutron-server $CFG_FILE_OPTIONS"
+    run_process q-svc "python $NEUTRON_BIN_DIR/neutron-server $cfg_file_options"
     echo "Waiting for Neutron to start..."
-    if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$Q_HOST:$Q_PORT; do sleep 1; done"; then
+    if is_ssl_enabled_service "neutron"; then
+        ssl_ca="--ca-certificate=${SSL_BUNDLE_FILE}"
+    fi
+    if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget ${ssl_ca} --no-proxy -q -O- $service_protocol://$Q_HOST:$service_port; do sleep 1; done"; then
         die $LINENO "Neutron did not start"
     fi
+    # Start proxy if enabled
+    if is_service_enabled tls-proxy; then
+        start_tls_proxy '*' $Q_PORT $Q_HOST $Q_PORT_INT &
+    fi
 }
 
 # Start running processes, including screen
 function start_neutron_agents {
     # Start up the neutron agents if enabled
-    screen_it q-agt "cd $NEUTRON_DIR && python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE"
-    screen_it q-dhcp "cd $NEUTRON_DIR && python $AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file=$Q_DHCP_CONF_FILE"
-
-    L3_CONF_FILES="--config-file $NEUTRON_CONF --config-file=$Q_L3_CONF_FILE"
+    run_process q-agt "python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE"
+    run_process q-dhcp "python $AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file=$Q_DHCP_CONF_FILE"
 
     if is_provider_network; then
         sudo ovs-vsctl add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE
@@ -516,29 +633,25 @@
         sudo ip link set $PUBLIC_INTERFACE up
     fi
 
-    if is_service_enabled q-fwaas; then
-        L3_CONF_FILES="$L3_CONF_FILES --config-file $Q_FWAAS_CONF_FILE"
-        VPN_CONF_FILES="$VPN_CONF_FILES --config-file $Q_FWAAS_CONF_FILE"
-    fi
     if is_service_enabled q-vpn; then
-        screen_it q-vpn "cd $NEUTRON_DIR && $AGENT_VPN_BINARY $VPN_CONF_FILES"
+        run_process q-vpn "$AGENT_VPN_BINARY $(determine_config_files neutron-vpn-agent)"
     else
-        screen_it q-l3 "cd $NEUTRON_DIR && python $AGENT_L3_BINARY $L3_CONF_FILES"
+        run_process q-l3 "python $AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)"
     fi
 
-    screen_it q-meta "cd $NEUTRON_DIR && python $AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file=$Q_META_CONF_FILE"
+    run_process q-meta "python $AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file=$Q_META_CONF_FILE"
 
     if [ "$VIRT_DRIVER" = 'xenserver' ]; then
         # For XenServer, start an agent for the domU openvswitch
-        screen_it q-domua "cd $NEUTRON_DIR && python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE.domU"
+        run_process q-domua "python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE.domU"
     fi
 
     if is_service_enabled q-lbaas; then
-        screen_it q-lbaas "cd $NEUTRON_DIR && python $AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME"
+        run_process q-lbaas "python $AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME"
     fi
 
     if is_service_enabled q-metering; then
-        screen_it q-metering "cd $NEUTRON_DIR && python $AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME"
+        run_process q-metering "python $AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME"
     fi
 }
 
@@ -574,7 +687,7 @@
     fi
 
     # delete all namespaces created by neutron
-    for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|qlbaas)-[0-9a-f-]*'); do
+    for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|qlbaas|fip|snat)-[0-9a-f-]*'); do
         sudo ip netns delete ${ns}
     done
 }
@@ -608,7 +721,7 @@
     Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME
     cp $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE
 
-    iniset /$Q_PLUGIN_CONF_FILE database connection `database_connection_url $Q_DB_NAME`
+    iniset $NEUTRON_CONF database connection `database_connection_url $Q_DB_NAME`
     iniset $NEUTRON_CONF DEFAULT state_path $DATA_DIR/neutron
 
     # If addition config files are set, make sure their path name is set as well
@@ -639,6 +752,23 @@
         setup_colorized_logging $NEUTRON_CONF DEFAULT project_id
     fi
 
+    if is_service_enabled tls-proxy; then
+        # Set the service port for a proxy to take the original
+        iniset $NEUTRON_CONF DEFAULT bind_port "$Q_PORT_INT"
+    fi
+
+    if is_ssl_enabled_service "nova"; then
+        iniset $NEUTRON_CONF DEFAULT nova_ca_certificates_file "$SSL_BUNDLE_FILE"
+    fi
+
+    if is_ssl_enabled_service "neutron"; then
+        ensure_certificates NEUTRON
+
+        iniset $NEUTRON_CONF DEFAULT use_ssl True
+        iniset $NEUTRON_CONF DEFAULT ssl_cert_file "$NEUTRON_SSL_CERT"
+        iniset $NEUTRON_CONF DEFAULT ssl_key_file "$NEUTRON_SSL_KEY"
+    fi
+
     _neutron_setup_rootwrap
 }
 
@@ -660,8 +790,6 @@
 }
 
 function _configure_neutron_dhcp_agent {
-    AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent"
-    Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini
 
     cp $NEUTRON_DIR/etc/dhcp_agent.ini $Q_DHCP_CONF_FILE
 
@@ -670,14 +798,6 @@
     iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
     iniset $Q_DHCP_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND"
 
-    # Define extra "DEFAULT" configuration options when q-dhcp is configured by
-    # defining the array ``Q_DHCP_EXTRA_DEFAULT_OPTS``.
-    # For Example: ``Q_DHCP_EXTRA_DEFAULT_OPTS=(foo=true bar=2)``
-    for I in "${Q_DHCP_EXTRA_DEFAULT_OPTS[@]}"; do
-        # Replace the first '=' with ' ' for iniset syntax
-        iniset $Q_DHCP_CONF_FILE DEFAULT ${I/=/ }
-    done
-
     _neutron_setup_interface_driver $Q_DHCP_CONF_FILE
 
     neutron_plugin_configure_dhcp_agent
@@ -689,20 +809,8 @@
     # for l3-agent, only use per tenant router if we have namespaces
     Q_L3_ROUTER_PER_TENANT=$Q_USE_NAMESPACE
 
-    AGENT_L3_BINARY=${AGENT_L3_BINARY:-"$NEUTRON_BIN_DIR/neutron-l3-agent"}
-    Q_L3_CONF_FILE=$NEUTRON_CONF_DIR/l3_agent.ini
-
-    if is_service_enabled q-fwaas; then
-        Q_FWAAS_CONF_FILE=$NEUTRON_CONF_DIR/fwaas_driver.ini
-    fi
-
     if is_service_enabled q-vpn; then
-        Q_VPN_CONF_FILE=$NEUTRON_CONF_DIR/vpn_agent.ini
         cp $NEUTRON_DIR/etc/vpn_agent.ini $Q_VPN_CONF_FILE
-        VPN_CONF_FILES="--config-file $NEUTRON_CONF --config-file=$Q_L3_CONF_FILE --config-file=$Q_VPN_CONF_FILE"
-        for cfg_file in ${Q_VPN_EXTRA_CONF_FILES[@]}; do
-            VPN_CONF_FILES+=" --config-file $cfg_file"
-        done
     fi
 
     cp $NEUTRON_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE
@@ -718,9 +826,6 @@
 }
 
 function _configure_neutron_metadata_agent {
-    AGENT_META_BINARY="$NEUTRON_BIN_DIR/neutron-metadata-agent"
-    Q_META_CONF_FILE=$NEUTRON_CONF_DIR/metadata_agent.ini
-
     cp $NEUTRON_DIR/etc/metadata_agent.ini $Q_META_CONF_FILE
 
     iniset $Q_META_CONF_FILE DEFAULT verbose True
@@ -728,12 +833,12 @@
     iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP
     iniset $Q_META_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND"
 
-    _neutron_setup_keystone $Q_META_CONF_FILE DEFAULT True True
+    _neutron_setup_keystone $Q_META_CONF_FILE DEFAULT
 
 }
 
 function _configure_neutron_ceilometer_notifications {
-    iniset $NEUTRON_CONF DEFAULT notification_driver neutron.openstack.common.notifier.rpc_notifier
+    iniset $NEUTRON_CONF DEFAULT notification_driver messaging
 }
 
 function _configure_neutron_lbaas {
@@ -756,6 +861,12 @@
     neutron_vpn_configure_common
 }
 
+function _configure_dvr {
+    iniset $NEUTRON_CONF DEFAULT router_distributed True
+    iniset $Q_L3_CONF_FILE DEFAULT agent_mode $Q_DVR_MODE
+}
+
+
 # _configure_neutron_plugin_agent() - Set config files for neutron plugin agent
 # It is called when q-agt is enabled.
 function _configure_neutron_plugin_agent {
@@ -793,14 +904,6 @@
     iniset $NEUTRON_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY
     _neutron_setup_keystone $NEUTRON_CONF keystone_authtoken
 
-    # Define extra "DEFAULT" configuration options when q-svc is configured by
-    # defining the array ``Q_SRV_EXTRA_DEFAULT_OPTS``.
-    # For Example: ``Q_SRV_EXTRA_DEFAULT_OPTS=(foo=true bar=2)``
-    for I in "${Q_SRV_EXTRA_DEFAULT_OPTS[@]}"; do
-        # Replace the first '=' with ' ' for iniset syntax
-        iniset $NEUTRON_CONF DEFAULT ${I/=/ }
-    done
-
     # Configuration for neutron notifations to nova.
     iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES
     iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES
@@ -872,19 +975,9 @@
 function _neutron_setup_keystone {
     local conf_file=$1
     local section=$2
-    local use_auth_url=$3
-    local skip_auth_cache=$4
 
-    iniset $conf_file $section auth_uri $KEYSTONE_SERVICE_URI
-    iniset $conf_file $section identity_uri $KEYSTONE_AUTH_URI
-    iniset $conf_file $section admin_tenant_name $SERVICE_TENANT_NAME
-    iniset $conf_file $section admin_user $Q_ADMIN_USERNAME
-    iniset $conf_file $section admin_password $SERVICE_PASSWORD
-    if [[ -z $skip_auth_cache ]]; then
-        iniset $conf_file $section signing_dir $NEUTRON_AUTH_CACHE_DIR
-        # Create cache dir
-        create_neutron_cache_dir
-    fi
+    create_neutron_cache_dir
+    configure_auth_token_middleware $conf_file $Q_ADMIN_USERNAME $NEUTRON_AUTH_CACHE_DIR $section
 }
 
 function _neutron_setup_interface_driver {
diff --git a/lib/neutron_plugins/README.md b/lib/neutron_plugins/README.md
index be8fd96..7192a05 100644
--- a/lib/neutron_plugins/README.md
+++ b/lib/neutron_plugins/README.md
@@ -25,7 +25,7 @@
   install_package bridge-utils
 * ``neutron_plugin_configure_common`` :
   set plugin-specific variables, ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``,
-  ``Q_DB_NAME``, ``Q_PLUGIN_CLASS``
+  ``Q_PLUGIN_CLASS``
 * ``neutron_plugin_configure_debug_command``
 * ``neutron_plugin_configure_dhcp_agent``
 * ``neutron_plugin_configure_l3_agent``
diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight
index efdd9ef..9e84f2e 100644
--- a/lib/neutron_plugins/bigswitch_floodlight
+++ b/lib/neutron_plugins/bigswitch_floodlight
@@ -19,7 +19,6 @@
 function neutron_plugin_configure_common {
     Q_PLUGIN_CONF_PATH=etc/neutron/plugins/bigswitch
     Q_PLUGIN_CONF_FILENAME=restproxy.ini
-    Q_DB_NAME="restproxy_neutron"
     Q_PLUGIN_CLASS="neutron.plugins.bigswitch.plugin.NeutronRestProxyV2"
     BS_FL_CONTROLLERS_PORT=${BS_FL_CONTROLLERS_PORT:-localhost:80}
     BS_FL_CONTROLLER_TIMEOUT=${BS_FL_CONTROLLER_TIMEOUT:-10}
diff --git a/lib/neutron_plugins/brocade b/lib/neutron_plugins/brocade
index e4cc754..511fb71 100644
--- a/lib/neutron_plugins/brocade
+++ b/lib/neutron_plugins/brocade
@@ -20,7 +20,6 @@
 function neutron_plugin_configure_common {
     Q_PLUGIN_CONF_PATH=etc/neutron/plugins/brocade
     Q_PLUGIN_CONF_FILENAME=brocade.ini
-    Q_DB_NAME="brcd_neutron"
     Q_PLUGIN_CLASS="neutron.plugins.brocade.NeutronPlugin.BrocadePluginV2"
 }
 
diff --git a/lib/neutron_plugins/cisco b/lib/neutron_plugins/cisco
index dccf400..1406e37 100644
--- a/lib/neutron_plugins/cisco
+++ b/lib/neutron_plugins/cisco
@@ -20,38 +20,12 @@
 # Specify the VLAN range
 Q_CISCO_PLUGIN_VLAN_RANGES=${Q_CISCO_PLUGIN_VLAN_RANGES:-vlan:1:4094}
 
-# Specify ncclient package information
-NCCLIENT_DIR=$DEST/ncclient
-NCCLIENT_VERSION=${NCCLIENT_VERSION:-0.3.1}
-NCCLIENT_REPO=${NCCLIENT_REPO:-git://github.com/CiscoSystems/ncclient.git}
-NCCLIENT_BRANCH=${NCCLIENT_BRANCH:-master}
-
 # This routine put a prefix on an existing function name
 function _prefix_function {
     declare -F $1 > /dev/null || die "$1 doesn't exist"
     eval "$(echo "${2}_${1}()"; declare -f ${1} | tail -n +2)"
 }
 
-function _has_ovs_subplugin {
-    local subplugin
-    for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do
-        if [[ "$subplugin" == "openvswitch" ]]; then
-            return 0
-        fi
-    done
-    return 1
-}
-
-function _has_nexus_subplugin {
-    local subplugin
-    for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do
-        if [[ "$subplugin" == "nexus" ]]; then
-            return 0
-        fi
-    done
-    return 1
-}
-
 function _has_n1kv_subplugin {
     local subplugin
     for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do
@@ -62,27 +36,6 @@
     return 1
 }
 
-# This routine populates the cisco config file with the information for
-# a particular nexus switch
-function _config_switch {
-    local cisco_cfg_file=$1
-    local switch_ip=$2
-    local username=$3
-    local password=$4
-    local ssh_port=$5
-    shift 5
-
-    local section="NEXUS_SWITCH:$switch_ip"
-    iniset $cisco_cfg_file $section username $username
-    iniset $cisco_cfg_file $section password $password
-    iniset $cisco_cfg_file $section ssh_port $ssh_port
-
-    while [[ ${#@} != 0 ]]; do
-        iniset  $cisco_cfg_file $section $1 $2
-        shift 2
-    done
-}
-
 # Prefix openvswitch plugin routines with "ovs" in order to differentiate from
 # cisco plugin routines. This means, ovs plugin routines will coexist with cisco
 # plugin routines in this script.
@@ -98,73 +51,17 @@
 _prefix_function neutron_plugin_setup_interface_driver ovs
 _prefix_function has_neutron_plugin_security_group ovs
 
-# Check the version of the installed ncclient package
-function check_ncclient_version {
-python << EOF
-version = '$NCCLIENT_VERSION'
-import sys
-try:
-    import pkg_resources
-    import ncclient
-    module_version = pkg_resources.get_distribution('ncclient').version
-    if version != module_version:
-        sys.exit(1)
-except:
-    sys.exit(1)
-EOF
-}
-
-# Install the ncclient package
-function install_ncclient {
-    git_clone $NCCLIENT_REPO $NCCLIENT_DIR $NCCLIENT_BRANCH
-    (cd $NCCLIENT_DIR; sudo python setup.py install)
-}
-
-# Check if the required version of ncclient has been installed
-function is_ncclient_installed {
-    # Check if the Cisco ncclient repository exists
-    if [[ -d $NCCLIENT_DIR ]]; then
-        remotes=$(cd $NCCLIENT_DIR; git remote -v | grep fetch | awk '{ print $2}')
-        for remote in $remotes; do
-            if [[ $remote == $NCCLIENT_REPO ]]; then
-                break;
-            fi
-        done
-        if [[ $remote != $NCCLIENT_REPO ]]; then
-            return 1
-        fi
-    else
-        return 1
-    fi
-
-    # Check if the ncclient is installed with the right version
-    if ! check_ncclient_version; then
-        return 1
-    fi
-    return 0
-}
-
 function has_neutron_plugin_security_group {
-    if _has_ovs_subplugin; then
-        ovs_has_neutron_plugin_security_group
-    else
-        return 1
-    fi
+    return 1
 }
 
 function is_neutron_ovs_base_plugin {
-    # Cisco uses OVS if openvswitch subplugin is deployed
-    _has_ovs_subplugin
     return
 }
 
 # populate required nova configuration parameters
 function neutron_plugin_create_nova_conf {
-    if _has_ovs_subplugin; then
-        ovs_neutron_plugin_create_nova_conf
-    else
-        _neutron_ovs_base_configure_nova_vif_driver
-    fi
+    _neutron_ovs_base_configure_nova_vif_driver
 }
 
 function neutron_plugin_install_agent_packages {
@@ -177,33 +74,14 @@
     # setup default subplugins
     if [ ! -v Q_CISCO_PLUGIN_SUBPLUGINS ]; then
         declare -ga Q_CISCO_PLUGIN_SUBPLUGINS
-        Q_CISCO_PLUGIN_SUBPLUGINS=(openvswitch nexus)
+        Q_CISCO_PLUGIN_SUBPLUGINS=(n1kv)
     fi
-    if _has_ovs_subplugin; then
-        ovs_neutron_plugin_configure_common
-        Q_PLUGIN_EXTRA_CONF_PATH=etc/neutron/plugins/cisco
-        Q_PLUGIN_EXTRA_CONF_FILES=(cisco_plugins.ini)
-        # Copy extra config files to /etc so that they can be modified
-        # later according to Cisco-specific localrc settings.
-        mkdir -p /$Q_PLUGIN_EXTRA_CONF_PATH
-        local f
-        local extra_conf_file
-        for (( f=0; $f < ${#Q_PLUGIN_EXTRA_CONF_FILES[@]}; f+=1 )); do
-            extra_conf_file=$Q_PLUGIN_EXTRA_CONF_PATH/${Q_PLUGIN_EXTRA_CONF_FILES[$f]}
-            cp $NEUTRON_DIR/$extra_conf_file /$extra_conf_file
-        done
-    else
-        Q_PLUGIN_CONF_PATH=etc/neutron/plugins/cisco
-        Q_PLUGIN_CONF_FILENAME=cisco_plugins.ini
-    fi
+    Q_PLUGIN_CONF_PATH=etc/neutron/plugins/cisco
+    Q_PLUGIN_CONF_FILENAME=cisco_plugins.ini
     Q_PLUGIN_CLASS="neutron.plugins.cisco.network_plugin.PluginV2"
-    Q_DB_NAME=cisco_neutron
 }
 
 function neutron_plugin_configure_debug_command {
-    if _has_ovs_subplugin; then
-        ovs_neutron_plugin_configure_debug_command
-    fi
 }
 
 function neutron_plugin_configure_dhcp_agent {
@@ -211,53 +89,6 @@
 }
 
 function neutron_plugin_configure_l3_agent {
-    if _has_ovs_subplugin; then
-        ovs_neutron_plugin_configure_l3_agent
-    fi
-}
-
-function _configure_nexus_subplugin {
-    local cisco_cfg_file=$1
-
-    # Install a known compatible ncclient from the Cisco repository if necessary
-    if ! is_ncclient_installed; then
-        # Preserve the two global variables
-        local offline=$OFFLINE
-        local reclone=$RECLONE
-        # Change their values to allow installation
-        OFFLINE=False
-        RECLONE=yes
-        install_ncclient
-        # Restore their values
-        OFFLINE=$offline
-        RECLONE=$reclone
-    fi
-
-    # Setup default nexus switch information
-    if [ ! -v Q_CISCO_PLUGIN_SWITCH_INFO ]; then
-        declare -A Q_CISCO_PLUGIN_SWITCH_INFO
-        HOST_NAME=$(hostname)
-        Q_CISCO_PLUGIN_SWITCH_INFO=([1.1.1.1]=stack:stack:22:${HOST_NAME}:1/10)
-    else
-        iniset $cisco_cfg_file CISCO nexus_driver neutron.plugins.cisco.nexus.cisco_nexus_network_driver_v2.CiscoNEXUSDriver
-    fi
-
-    # Setup the switch configurations
-    local nswitch
-    local sw_info
-    local segment
-    local sw_info_array
-    declare -i count=0
-    for nswitch in ${!Q_CISCO_PLUGIN_SWITCH_INFO[@]}; do
-        sw_info=${Q_CISCO_PLUGIN_SWITCH_INFO[$nswitch]}
-        sw_info_array=${sw_info//:/ }
-        sw_info_array=( $sw_info_array )
-        count=${#sw_info_array[@]}
-        if [[ $count < 5 || $(( ($count-3) % 2 )) != 0 ]]; then
-            die $LINENO "Incorrect switch configuration: ${Q_CISCO_PLUGIN_SWITCH_INFO[$nswitch]}"
-        fi
-        _config_switch $cisco_cfg_file $nswitch ${sw_info_array[@]}
-    done
 }
 
 # Configure n1kv plugin
@@ -280,48 +111,29 @@
 }
 
 function neutron_plugin_configure_plugin_agent {
-    if _has_ovs_subplugin; then
-        ovs_neutron_plugin_configure_plugin_agent
-    fi
 }
 
 function neutron_plugin_configure_service {
     local subplugin
     local cisco_cfg_file
 
-    if _has_ovs_subplugin; then
-        ovs_neutron_plugin_configure_service
-        cisco_cfg_file=/${Q_PLUGIN_EXTRA_CONF_FILES[0]}
-    else
-        cisco_cfg_file=/$Q_PLUGIN_CONF_FILE
-    fi
+    cisco_cfg_file=/$Q_PLUGIN_CONF_FILE
 
     # Setup the [CISCO_PLUGINS] section
     if [[ ${#Q_CISCO_PLUGIN_SUBPLUGINS[@]} > 2 ]]; then
         die $LINENO "At most two subplugins are supported."
     fi
 
-    if _has_ovs_subplugin && _has_n1kv_subplugin; then
-        die $LINENO "OVS subplugin and n1kv subplugin cannot coexist"
-    fi
-
     # Setup the subplugins
-    inicomment $cisco_cfg_file CISCO_PLUGINS nexus_plugin
     inicomment $cisco_cfg_file CISCO_PLUGINS vswitch_plugin
     inicomment $cisco_cfg_file CISCO_TEST host
     for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do
         case $subplugin in
-            nexus) iniset $cisco_cfg_file CISCO_PLUGINS nexus_plugin neutron.plugins.cisco.nexus.cisco_nexus_plugin_v2.NexusPlugin;;
-            openvswitch) iniset $cisco_cfg_file CISCO_PLUGINS vswitch_plugin neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2;;
             n1kv) iniset $cisco_cfg_file CISCO_PLUGINS vswitch_plugin neutron.plugins.cisco.n1kv.n1kv_neutron_plugin.N1kvNeutronPluginV2;;
             *) die $LINENO "Unsupported cisco subplugin: $subplugin";;
         esac
     done
 
-    if _has_nexus_subplugin; then
-        _configure_nexus_subplugin $cisco_cfg_file
-    fi
-
     if _has_n1kv_subplugin; then
         _configure_n1kv_subplugin $cisco_cfg_file
     fi
diff --git a/lib/neutron_plugins/embrane b/lib/neutron_plugins/embrane
index cce108a..7dafdc0 100644
--- a/lib/neutron_plugins/embrane
+++ b/lib/neutron_plugins/embrane
@@ -18,7 +18,6 @@
 function neutron_plugin_configure_common {
     Q_PLUGIN_CONF_PATH=etc/neutron/plugins/embrane
     Q_PLUGIN_CONF_FILENAME=heleos_conf.ini
-    Q_DB_NAME="ovs_neutron"
     Q_PLUGIN_CLASS="neutron.plugins.embrane.plugins.embrane_ovs_plugin.EmbraneOvsPlugin"
 }
 
diff --git a/lib/neutron_plugins/ibm b/lib/neutron_plugins/ibm
index 3aef9d0..39b0040 100644
--- a/lib/neutron_plugins/ibm
+++ b/lib/neutron_plugins/ibm
@@ -60,7 +60,6 @@
 function neutron_plugin_configure_common {
     Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ibm
     Q_PLUGIN_CONF_FILENAME=sdnve_neutron_plugin.ini
-    Q_DB_NAME="sdnve_neutron"
     Q_PLUGIN_CLASS="neutron.plugins.ibm.sdnve_neutron_plugin.SdnvePluginV2"
 }
 
diff --git a/lib/neutron_plugins/linuxbridge b/lib/neutron_plugins/linuxbridge
index 113a7df..5f989ae 100644
--- a/lib/neutron_plugins/linuxbridge
+++ b/lib/neutron_plugins/linuxbridge
@@ -10,7 +10,6 @@
 function neutron_plugin_configure_common {
     Q_PLUGIN_CONF_PATH=etc/neutron/plugins/linuxbridge
     Q_PLUGIN_CONF_FILENAME=linuxbridge_conf.ini
-    Q_DB_NAME="neutron_linux_bridge"
     Q_PLUGIN_CLASS="neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2"
 }
 
diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent
index 82b5fc9..2638dd3 100644
--- a/lib/neutron_plugins/linuxbridge_agent
+++ b/lib/neutron_plugins/linuxbridge_agent
@@ -47,6 +47,7 @@
         iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver
     fi
     AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-linuxbridge-agent"
+    iniset /$Q_PLUGIN_CONF_FILE agent tunnel_types $Q_TUNNEL_TYPES
     # Define extra "AGENT" configuration options when q-agt is configured by defining
     # the array ``Q_AGENT_EXTRA_AGENT_OPTS``.
     # For Example: ``Q_AGENT_EXTRA_AGENT_OPTS=(foo=true bar=2)``
diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet
index c5373d6..6ccd502 100644
--- a/lib/neutron_plugins/midonet
+++ b/lib/neutron_plugins/midonet
@@ -26,7 +26,6 @@
 function neutron_plugin_configure_common {
     Q_PLUGIN_CONF_PATH=etc/neutron/plugins/midonet
     Q_PLUGIN_CONF_FILENAME=midonet.ini
-    Q_DB_NAME="neutron_midonet"
     Q_PLUGIN_CLASS="neutron.plugins.midonet.plugin.MidonetPluginV2"
 }
 
diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2
index 8e131bb..44b947f 100644
--- a/lib/neutron_plugins/ml2
+++ b/lib/neutron_plugins/ml2
@@ -6,13 +6,13 @@
 set +o xtrace
 
 # Enable this to simply and quickly enable tunneling with ML2.
-# Select either 'gre', 'vxlan', or '(gre vxlan)'
-Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-}
+# Select either 'gre', 'vxlan', or 'gre,vxlan'
+Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"vxlan"}
 # This has to be set here since the agent will set this in the config file
-if [[ "$Q_ML2_TENANT_NETWORK_TYPE" != "" ]]; then
-    Q_AGENT_EXTRA_AGENT_OPTS+=(tunnel_types=$Q_ML2_TENANT_NETWORK_TYPE)
+if [[ "$Q_ML2_TENANT_NETWORK_TYPE" == "gre" || "$Q_ML2_TENANT_NETWORK_TYPE" == "vxlan" ]]; then
+    Q_TUNNEL_TYPES=$Q_ML2_TENANT_NETWORK_TYPE
 elif [[ "$ENABLE_TENANT_TUNNELS" == "True" ]]; then
-    Q_AGENT_EXTRA_AGENT_OPTS+=(tunnel_types=gre)
+    Q_TUNNEL_TYPES=gre
 fi
 
 # Default openvswitch L2 agent
@@ -50,7 +50,6 @@
 function neutron_plugin_configure_common {
     Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ml2
     Q_PLUGIN_CONF_FILENAME=ml2_conf.ini
-    Q_DB_NAME="neutron_ml2"
     Q_PLUGIN_CLASS="neutron.plugins.ml2.plugin.Ml2Plugin"
     # The ML2 plugin delegates L3 routing/NAT functionality to
     # the L3 service plugin which must therefore be specified.
@@ -58,7 +57,7 @@
 }
 
 function neutron_plugin_configure_service {
-    if [[ "$Q_ML2_TENANT_NETWORK_TYPE" != "" ]]; then
+    if [[ "$Q_ML2_TENANT_NETWORK_TYPE" != "local" ]]; then
         Q_SRV_EXTRA_OPTS+=(tenant_network_types=$Q_ML2_TENANT_NETWORK_TYPE)
     elif [[ "$ENABLE_TENANT_TUNNELS" == "True" ]]; then
         # This assumes you want a simple configuration, and will overwrite
@@ -112,6 +111,12 @@
     populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_vxlan $Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS
 
     populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_vlan $Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS
+
+    if [[ "$Q_DVR_MODE" != "legacy" ]]; then
+        populate_ml2_config /$Q_PLUGIN_CONF_FILE agent l2_population=True
+        populate_ml2_config /$Q_PLUGIN_CONF_FILE agent tunnel_types=vxlan
+        populate_ml2_config /$Q_PLUGIN_CONF_FILE agent enable_distributed_routing=True
+    fi
 }
 
 function has_neutron_plugin_security_group {
diff --git a/lib/neutron_plugins/nec b/lib/neutron_plugins/nec
index d76f7d4..f8d98c3 100644
--- a/lib/neutron_plugins/nec
+++ b/lib/neutron_plugins/nec
@@ -39,7 +39,6 @@
 function neutron_plugin_configure_common {
     Q_PLUGIN_CONF_PATH=etc/neutron/plugins/nec
     Q_PLUGIN_CONF_FILENAME=nec.ini
-    Q_DB_NAME="neutron_nec"
     Q_PLUGIN_CLASS="neutron.plugins.nec.nec_plugin.NECPluginV2"
 }
 
diff --git a/lib/neutron_plugins/nuage b/lib/neutron_plugins/nuage
index 86f09d2..52d85a2 100644
--- a/lib/neutron_plugins/nuage
+++ b/lib/neutron_plugins/nuage
@@ -20,7 +20,6 @@
 function neutron_plugin_configure_common {
     Q_PLUGIN_CONF_PATH=etc/neutron/plugins/nuage
     Q_PLUGIN_CONF_FILENAME=nuage_plugin.ini
-    Q_DB_NAME="nuage_neutron"
     Q_PLUGIN_CLASS="neutron.plugins.nuage.plugin.NuagePlugin"
     Q_PLUGIN_EXTENSIONS_PATH=neutron/plugins/nuage/extensions
     #Nuage specific Neutron defaults. Actual value must be set and sourced
diff --git a/lib/neutron_plugins/ofagent_agent b/lib/neutron_plugins/ofagent_agent
index 66283ad..a5a58f4 100644
--- a/lib/neutron_plugins/ofagent_agent
+++ b/lib/neutron_plugins/ofagent_agent
@@ -34,10 +34,18 @@
     iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport
 }
 
+function _neutron_ofagent_configure_firewall_driver {
+    if [[ "$Q_USE_SECGROUP" == "True" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
+    else
+        iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver
+    fi
+}
+
 function neutron_plugin_configure_plugin_agent {
     # Set up integration bridge
     _neutron_ovs_base_setup_bridge $OVS_BRIDGE
-    _neutron_ovs_base_configure_firewall_driver
+    _neutron_ofagent_configure_firewall_driver
 
     # Check a supported openflow version
     OF_VERSION=`ovs-ofctl --version | grep "OpenFlow versions" | awk '{print $3}' | cut -d':' -f2`
@@ -71,6 +79,7 @@
     fi
     AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-ofagent-agent"
 
+    iniset /$Q_PLUGIN_CONF_FILE agent tunnel_types $Q_TUNNEL_TYPES
     # Define extra "AGENT" configuration options when q-agt is configured by defining
     # defining the array ``Q_AGENT_EXTRA_AGENT_OPTS``.
     # For Example: ``Q_AGENT_EXTRA_AGENT_OPTS=(foo=true bar=2)``
diff --git a/lib/neutron_plugins/oneconvergence b/lib/neutron_plugins/oneconvergence
index 06f1eee..e5f0d71 100644
--- a/lib/neutron_plugins/oneconvergence
+++ b/lib/neutron_plugins/oneconvergence
@@ -19,7 +19,6 @@
     Q_PLUGIN_CONF_PATH=etc/neutron/plugins/oneconvergence
     Q_PLUGIN_CONF_FILENAME=nvsdplugin.ini
     Q_PLUGIN_CLASS="neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2"
-    Q_DB_NAME='oc_nvsd_neutron'
 }
 
 # Configure plugin specific information
diff --git a/lib/neutron_plugins/openvswitch b/lib/neutron_plugins/openvswitch
index fc81092..c468132 100644
--- a/lib/neutron_plugins/openvswitch
+++ b/lib/neutron_plugins/openvswitch
@@ -10,7 +10,6 @@
 function neutron_plugin_configure_common {
     Q_PLUGIN_CONF_PATH=etc/neutron/plugins/openvswitch
     Q_PLUGIN_CONF_FILENAME=ovs_neutron_plugin.ini
-    Q_DB_NAME="ovs_neutron"
     Q_PLUGIN_CLASS="neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2"
 }
 
diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent
index 5adb0c5..835f645 100644
--- a/lib/neutron_plugins/openvswitch_agent
+++ b/lib/neutron_plugins/openvswitch_agent
@@ -67,7 +67,7 @@
 
     if [ "$VIRT_DRIVER" == 'xenserver' ]; then
         # Make a copy of our config for domU
-        sudo cp /$Q_PLUGIN_CONF_FILE "/$Q_PLUGIN_CONF_FILE.domu"
+        sudo cp /$Q_PLUGIN_CONF_FILE "/$Q_PLUGIN_CONF_FILE.domU"
 
         # Deal with Dom0's L2 Agent:
         Q_RR_DOM0_COMMAND="$NEUTRON_BIN_DIR/neutron-rootwrap-xen-dom0 $Q_RR_CONF_FILE"
@@ -102,6 +102,7 @@
         # Set root wrap
         iniset "/$Q_PLUGIN_CONF_FILE.domU" agent root_helper "$Q_RR_COMMAND"
     fi
+    iniset /$Q_PLUGIN_CONF_FILE agent tunnel_types $Q_TUNNEL_TYPES
     # Define extra "AGENT" configuration options when q-agt is configured by defining
     # defining the array ``Q_AGENT_EXTRA_AGENT_OPTS``.
     # For Example: ``Q_AGENT_EXTRA_AGENT_OPTS=(foo=true bar=2)``
diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base
index 26c5489..d913f7c 100644
--- a/lib/neutron_plugins/ovs_base
+++ b/lib/neutron_plugins/ovs_base
@@ -7,6 +7,7 @@
 
 OVS_BRIDGE=${OVS_BRIDGE:-br-int}
 PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex}
+OVS_DATAPATH_TYPE=${OVS_DATAPATH_TYPE:-""}
 
 function is_neutron_ovs_base_plugin {
     # Yes, we use OVS.
@@ -17,6 +18,9 @@
     local bridge=$1
     neutron-ovs-cleanup
     sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge
+    if [[ $OVS_DATAPATH_TYPE != "" ]]; then
+        sudo ovs-vsctl set Bridge $bridge datapath_type=${OVS_DATAPATH_TYPE}
+    fi
     sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge
 }
 
@@ -47,6 +51,7 @@
     install_package $(get_packages "openvswitch")
     if is_ubuntu; then
         _neutron_ovs_base_install_ubuntu_dkms
+        restart_service openvswitch-switch
     elif is_fedora; then
         restart_service openvswitch
     elif is_suse; then
@@ -67,7 +72,11 @@
 }
 
 function _neutron_ovs_base_configure_l3_agent {
-    iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE
+    if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then
+        iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge ""
+    else
+        iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE
+    fi
 
     neutron-ovs-cleanup
     # --no-wait causes a race condition if $PUBLIC_BRIDGE is not up when ip addr flush is called
diff --git a/lib/neutron_plugins/plumgrid b/lib/neutron_plugins/plumgrid
index 178bca7..7950ac0 100644
--- a/lib/neutron_plugins/plumgrid
+++ b/lib/neutron_plugins/plumgrid
@@ -17,7 +17,6 @@
 function neutron_plugin_configure_common {
     Q_PLUGIN_CONF_PATH=etc/neutron/plugins/plumgrid
     Q_PLUGIN_CONF_FILENAME=plumgrid.ini
-    Q_DB_NAME="plumgrid_neutron"
     Q_PLUGIN_CLASS="neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2"
     PLUMGRID_DIRECTOR_IP=${PLUMGRID_DIRECTOR_IP:-localhost}
     PLUMGRID_DIRECTOR_PORT=${PLUMGRID_DIRECTOR_PORT:-7766}
@@ -46,8 +45,8 @@
 }
 
 function has_neutron_plugin_security_group {
-    # False
-    return 1
+    # return 0 means enabled
+    return 0
 }
 
 function neutron_plugin_check_adv_test_requirements {
diff --git a/lib/neutron_plugins/ryu b/lib/neutron_plugins/ryu
index ceb89fa..f45a797 100644
--- a/lib/neutron_plugins/ryu
+++ b/lib/neutron_plugins/ryu
@@ -25,7 +25,6 @@
 function neutron_plugin_configure_common {
     Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ryu
     Q_PLUGIN_CONF_FILENAME=ryu.ini
-    Q_DB_NAME="ovs_neutron"
     Q_PLUGIN_CLASS="neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2"
 }
 
diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer
index 78e7738..f84b710 100644
--- a/lib/neutron_plugins/services/loadbalancer
+++ b/lib/neutron_plugins/services/loadbalancer
@@ -10,11 +10,8 @@
 LBAAS_PLUGIN=neutron.services.loadbalancer.plugin.LoadBalancerPlugin
 
 function neutron_agent_lbaas_install_agent_packages {
-    if is_ubuntu || is_fedora; then
+    if is_ubuntu || is_fedora || is_suse; then
         install_package haproxy
-    elif is_suse; then
-        ### FIXME: Find out if package can be pushed to Factory
-        echo "HAProxy packages can be installed from server:http project in OBS"
     fi
 }
 
diff --git a/lib/neutron_plugins/services/vpn b/lib/neutron_plugins/services/vpn
index d920ba6..2478c47 100644
--- a/lib/neutron_plugins/services/vpn
+++ b/lib/neutron_plugins/services/vpn
@@ -7,7 +7,7 @@
 
 
 AGENT_VPN_BINARY="$NEUTRON_BIN_DIR/neutron-vpn-agent"
-VPN_PLUGIN="neutron.services.vpn.plugin.VPNDriverPlugin"
+VPN_PLUGIN=${VPN_PLUGIN:-"neutron.services.vpn.plugin.VPNDriverPlugin"}
 IPSEC_PACKAGE=${IPSEC_PACKAGE:-"openswan"}
 
 function neutron_vpn_install_agent_packages {
diff --git a/lib/neutron_plugins/vmware_nsx b/lib/neutron_plugins/vmware_nsx
index c7672db..f4eb82d 100644
--- a/lib/neutron_plugins/vmware_nsx
+++ b/lib/neutron_plugins/vmware_nsx
@@ -40,7 +40,6 @@
 function neutron_plugin_configure_common {
     Q_PLUGIN_CONF_PATH=etc/neutron/plugins/vmware
     Q_PLUGIN_CONF_FILENAME=nsx.ini
-    Q_DB_NAME="neutron_nsx"
     Q_PLUGIN_CLASS="neutron.plugins.vmware.plugin.NsxPlugin"
 }
 
@@ -106,9 +105,6 @@
     if [[ "$NSX_PASSWORD" != "" ]]; then
         iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_password $NSX_PASSWORD
     fi
-    if [[ "$NSX_REQ_TIMEOUT" != "" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE DEFAULT req_timeout $NSX_REQ_TIMEOUT
-    fi
     if [[ "$NSX_HTTP_TIMEOUT" != "" ]]; then
         iniset /$Q_PLUGIN_CONF_FILE DEFAULT http_timeout $NSX_HTTP_TIMEOUT
     fi
diff --git a/lib/neutron_thirdparty/README.md b/lib/neutron_thirdparty/README.md
index 2460e5c..5655e0b 100644
--- a/lib/neutron_thirdparty/README.md
+++ b/lib/neutron_thirdparty/README.md
@@ -28,12 +28,14 @@
   git clone xxx
 
 * ``start_<third_party>``:
-  start running processes, including screen
+  start running processes, including screen if USE_SCREEN=True
   e.g.
-  screen_it XXXX "cd $XXXXY_DIR && $XXXX_DIR/bin/XXXX-bin"
+  run_process XXXX "$XXXX_DIR/bin/XXXX-bin"
 
 * ``stop_<third_party>``:
   stop running processes (non-screen)
+  e.g.
+  stop_process XXXX
 
 * ``check_<third_party>``:
   verify that the integration between neutron server and third-party components is sane
diff --git a/lib/neutron_thirdparty/ryu b/lib/neutron_thirdparty/ryu
index c737600..233f3aa 100644
--- a/lib/neutron_thirdparty/ryu
+++ b/lib/neutron_thirdparty/ryu
@@ -64,7 +64,7 @@
 }
 
 function start_ryu {
-    screen_it ryu "cd $RYU_DIR && $RYU_DIR/bin/ryu-manager --config-file $RYU_CONF"
+    run_process ryu "$RYU_DIR/bin/ryu-manager --config-file $RYU_CONF"
 }
 
 function stop_ryu {
diff --git a/lib/nova b/lib/nova
index ebdb6b4..c24bc2f 100644
--- a/lib/nova
+++ b/lib/nova
@@ -39,15 +39,25 @@
 NOVA_CONF_DIR=/etc/nova
 NOVA_CONF=$NOVA_CONF_DIR/nova.conf
 NOVA_CELLS_CONF=$NOVA_CONF_DIR/nova-cells.conf
+NOVA_FAKE_CONF=$NOVA_CONF_DIR/nova-fake.conf
 NOVA_CELLS_DB=${NOVA_CELLS_DB:-nova_cell}
 
 NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini}
 
+if is_ssl_enabled_service "nova" || is_service_enabled tls-proxy; then
+    NOVA_SERVICE_PROTOCOL="https"
+    EC2_SERVICE_PROTOCOL="https"
+else
+    EC2_SERVICE_PROTOCOL="http"
+fi
+
 # Public facing bits
 NOVA_SERVICE_HOST=${NOVA_SERVICE_HOST:-$SERVICE_HOST}
 NOVA_SERVICE_PORT=${NOVA_SERVICE_PORT:-8774}
 NOVA_SERVICE_PORT_INT=${NOVA_SERVICE_PORT_INT:-18774}
 NOVA_SERVICE_PROTOCOL=${NOVA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
+EC2_SERVICE_PORT=${EC2_SERVICE_PORT:-8773}
+EC2_SERVICE_PORT_INT=${EC2_SERVICE_PORT_INT:-18773}
 
 # Support entry points installation of console scripts
 if [[ -d $NOVA_DIR/bin ]]; then
@@ -59,10 +69,6 @@
 # Set the paths of certain binaries
 NOVA_ROOTWRAP=$(get_rootwrap_location nova)
 
-# Allow rate limiting to be turned off for testing, like for Tempest
-# NOTE: Set API_RATE_LIMIT="False" to turn OFF rate limiting
-API_RATE_LIMIT=${API_RATE_LIMIT:-"True"}
-
 # Option to enable/disable config drive
 # NOTE: Set FORCE_CONFIG_DRIVE="False" to turn OFF config drive
 FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"always"}
@@ -88,6 +94,10 @@
     | grep ^[ep] \
     | head -1)
 
+# $NOVA_VNC_ENABLED can be used to forcibly enable vnc configuration.
+# In multi-node setups allows compute hosts to not run n-novnc.
+NOVA_VNC_ENABLED=$(trueorfalse False $NOVA_VNC_ENABLED)
+
 # Get hypervisor configuration
 # ----------------------------
 
@@ -173,14 +183,15 @@
         clean_iptables
 
         # Destroy old instances
-        instances=`sudo virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"`
+        local instances=`sudo virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"`
         if [ ! "$instances" = "" ]; then
             echo $instances | xargs -n1 sudo virsh destroy || true
             echo $instances | xargs -n1 sudo virsh undefine --managed-save || true
         fi
 
         # Logout and delete iscsi sessions
-        tgts=$(sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d ' ' -f2)
+        local tgts=$(sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d ' ' -f2)
+        local target
         for target in $tgts; do
             sudo iscsiadm --mode node -T $target --logout || true
         done
@@ -218,14 +229,14 @@
     sudo chown root:root $NOVA_CONF_DIR/rootwrap.conf
     sudo chmod 0644 $NOVA_CONF_DIR/rootwrap.conf
     # Specify rootwrap.conf as first parameter to nova-rootwrap
-    ROOTWRAP_SUDOER_CMD="$NOVA_ROOTWRAP $NOVA_CONF_DIR/rootwrap.conf *"
+    local rootwrap_sudoer_cmd="$NOVA_ROOTWRAP $NOVA_CONF_DIR/rootwrap.conf *"
 
     # Set up the rootwrap sudoers for nova
-    TEMPFILE=`mktemp`
-    echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE
-    chmod 0440 $TEMPFILE
-    sudo chown root:root $TEMPFILE
-    sudo mv $TEMPFILE /etc/sudoers.d/nova-rootwrap
+    local tempfile=`mktemp`
+    echo "$STACK_USER ALL=(root) NOPASSWD: $rootwrap_sudoer_cmd" >$tempfile
+    chmod 0440 $tempfile
+    sudo chown root:root $tempfile
+    sudo mv $tempfile /etc/sudoers.d/nova-rootwrap
 }
 
 # configure_nova() - Set config files, create data dirs, etc
@@ -274,7 +285,7 @@
             if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then
                 if is_ubuntu; then
                     if [[ ! "$DISTRO" > natty ]]; then
-                        cgline="none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0"
+                        local cgline="none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0"
                         sudo mkdir -p /cgroup
                         if ! grep -q cgroup /etc/fstab; then
                             echo "$cgline" | sudo tee -a /etc/fstab
@@ -328,33 +339,25 @@
 # Migrated from keystone_data.sh
 create_nova_accounts() {
 
-    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
+    local service_tenant=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    local admin_role=$(openstack role list | awk "/ admin / { print \$2 }")
 
     # Nova
     if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
 
-        NOVA_USER=$(get_or_create_user "nova" \
-            "$SERVICE_PASSWORD" $SERVICE_TENANT "nova@example.com")
-        get_or_add_user_role $ADMIN_ROLE $NOVA_USER $SERVICE_TENANT
+        local nova_user=$(get_or_create_user "nova" \
+            "$SERVICE_PASSWORD" $service_tenant)
+        get_or_add_user_role $admin_role $nova_user $service_tenant
 
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
 
-            NOVA_SERVICE=$(get_or_create_service "nova" \
+            local nova_service=$(get_or_create_service "nova" \
                 "compute" "Nova Compute Service")
-            get_or_create_endpoint $NOVA_SERVICE \
+            get_or_create_endpoint $nova_service \
                 "$REGION_NAME" \
                 "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \
                 "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \
                 "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s"
-
-            NOVA_V3_SERVICE=$(get_or_create_service "novav3" \
-                "computev3" "Nova Compute Service V3")
-            get_or_create_endpoint $NOVA_V3_SERVICE \
-                "$REGION_NAME" \
-                "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3" \
-                "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3" \
-                "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3"
         fi
     fi
 
@@ -369,13 +372,13 @@
         # EC2
         if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then
 
-            EC2_SERVICE=$(get_or_create_service "ec2" \
+            local ec2_service=$(get_or_create_service "ec2" \
                 "ec2" "EC2 Compatibility Layer")
-            get_or_create_endpoint $EC2_SERVICE \
+            get_or_create_endpoint $ec2_service \
                 "$REGION_NAME" \
-                "http://$SERVICE_HOST:8773/services/Cloud" \
-                "http://$SERVICE_HOST:8773/services/Admin" \
-                "http://$SERVICE_HOST:8773/services/Cloud"
+                "$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:8773/services/Cloud" \
+                "$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:8773/services/Admin" \
+                "$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:8773/services/Cloud"
         fi
     fi
 
@@ -383,8 +386,8 @@
     if is_service_enabled n-obj swift3; then
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
 
-            S3_SERVICE=$(get_or_create_service "s3" "s3" "S3")
-            get_or_create_endpoint $S3_SERVICE \
+            local s3_service=$(get_or_create_service "s3" "s3" "S3")
+            get_or_create_endpoint $s3_service \
                 "$REGION_NAME" \
                 "http://$SERVICE_HOST:$S3_SERVICE_PORT" \
                 "http://$SERVICE_HOST:$S3_SERVICE_PORT" \
@@ -404,12 +407,12 @@
     iniset $NOVA_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL"
     iniset $NOVA_CONF DEFAULT auth_strategy "keystone"
     iniset $NOVA_CONF DEFAULT allow_resize_to_same_host "True"
+    iniset $NOVA_CONF DEFAULT allow_migrate_to_same_host "True"
     iniset $NOVA_CONF DEFAULT api_paste_config "$NOVA_API_PASTE_INI"
     iniset $NOVA_CONF DEFAULT rootwrap_config "$NOVA_CONF_DIR/rootwrap.conf"
     iniset $NOVA_CONF DEFAULT scheduler_driver "$SCHEDULER"
     iniset $NOVA_CONF DEFAULT dhcpbridge_flagfile "$NOVA_CONF"
     iniset $NOVA_CONF DEFAULT force_dhcp_release "True"
-    iniset $NOVA_CONF DEFAULT fixed_range ""
     iniset $NOVA_CONF DEFAULT default_floating_pool "$PUBLIC_NETWORK_NAME"
     iniset $NOVA_CONF DEFAULT s3_host "$SERVICE_HOST"
     iniset $NOVA_CONF DEFAULT s3_port "$S3_SERVICE_PORT"
@@ -435,16 +438,17 @@
             iniset $NOVA_CONF DEFAULT osapi_compute_listen_port "$NOVA_SERVICE_PORT_INT"
         fi
 
-        # Add keystone authtoken configuration
-
-        iniset $NOVA_CONF keystone_authtoken identity_uri $KEYSTONE_AUTH_URI
-        iniset $NOVA_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
-        iniset $NOVA_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA
-        iniset $NOVA_CONF keystone_authtoken admin_user nova
-        iniset $NOVA_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
+        configure_auth_token_middleware $NOVA_CONF nova $NOVA_AUTH_CACHE_DIR
     fi
 
-    iniset $NOVA_CONF keystone_authtoken signing_dir $NOVA_AUTH_CACHE_DIR
+    if is_service_enabled cinder; then
+        if is_ssl_enabled_service "cinder" || is_service_enabled tls-proxy; then
+            CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST}
+            CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776}
+            iniset $NOVA_CONF cinder endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/%(project_id)s"
+            iniset $NOVA_CONF cinder ca_certificates_file $SSL_BUNDLE_FILE
+        fi
+    fi
 
     if [ -n "$NOVA_STATE_PATH" ]; then
         iniset $NOVA_CONF DEFAULT state_path "$NOVA_STATE_PATH"
@@ -460,9 +464,6 @@
     if [ "$SYSLOG" != "False" ]; then
         iniset $NOVA_CONF DEFAULT use_syslog "True"
     fi
-    if [ "$API_RATE_LIMIT" != "True" ]; then
-        iniset $NOVA_CONF DEFAULT api_rate_limit "False"
-    fi
     if [ "$FORCE_CONFIG_DRIVE" != "False" ]; then
         iniset $NOVA_CONF DEFAULT force_config_drive "$FORCE_CONFIG_DRIVE"
     fi
@@ -480,18 +481,6 @@
         iniset $NOVA_CONF DEFAULT notification_driver "messaging"
     fi
 
-    # Provide some transition from ``EXTRA_FLAGS`` to ``EXTRA_OPTS``
-    if [[ -z "$EXTRA_OPTS" && -n "$EXTRA_FLAGS" ]]; then
-        EXTRA_OPTS=$EXTRA_FLAGS
-    fi
-
-    # Define extra nova conf flags by defining the array ``EXTRA_OPTS``.
-    # For Example: ``EXTRA_OPTS=(foo=true bar=2)``
-    for I in "${EXTRA_OPTS[@]}"; do
-        # Replace the first '=' with ' ' for iniset syntax
-        iniset $NOVA_CONF DEFAULT ${I/=/ }
-    done
-
     # All nova-compute workers need to know the vnc configuration options
     # These settings don't hurt anything if n-xvnc and n-novnc are disabled
     if is_service_enabled n-cpu; then
@@ -503,7 +492,7 @@
         iniset $NOVA_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL"
     fi
 
-    if is_service_enabled n-novnc || is_service_enabled n-xvnc; then
+    if is_service_enabled n-novnc || is_service_enabled n-xvnc || [ "$NOVA_VNC_ENABLED" != False ]; then
         # Address on which instance vncservers will listen on compute hosts.
         # For multi-host, this should be the management ip of the compute host.
         VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1}
@@ -528,8 +517,31 @@
     fi
 
     iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST"
+    iniset $NOVA_CONF DEFAULT keystone_ec2_url $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens
     iniset_rpc_backend nova $NOVA_CONF DEFAULT
-    iniset $NOVA_CONF glance api_servers "$GLANCE_HOSTPORT"
+    iniset $NOVA_CONF glance api_servers "${GLANCE_SERVICE_PROTOCOL}://${GLANCE_HOSTPORT}"
+
+    iniset $NOVA_CONF DEFAULT osapi_compute_workers "$API_WORKERS"
+    iniset $NOVA_CONF DEFAULT ec2_workers "$API_WORKERS"
+    iniset $NOVA_CONF DEFAULT metadata_workers "$API_WORKERS"
+
+    if is_ssl_enabled_service glance || is_service_enabled tls-proxy; then
+        iniset $NOVA_CONF DEFAULT glance_protocol https
+    fi
+
+    # Register SSL certificates if provided
+    if is_ssl_enabled_service nova; then
+        ensure_certificates NOVA
+
+        iniset $NOVA_CONF DEFAULT ssl_cert_file "$NOVA_SSL_CERT"
+        iniset $NOVA_CONF DEFAULT ssl_key_file "$NOVA_SSL_KEY"
+
+        iniset $NOVA_CONF DEFAULT enabled_ssl_apis "$NOVA_ENABLED_APIS"
+    fi
+
+    if is_service_enabled tls-proxy; then
+        iniset $NOVA_CONF DEFAULT ec2_listen_port $EC2_SERVICE_PORT_INT
+    fi
 }
 
 function init_nova_cells {
@@ -658,19 +670,22 @@
 function start_nova_api {
     # Get right service port for testing
     local service_port=$NOVA_SERVICE_PORT
+    local service_protocol=$NOVA_SERVICE_PROTOCOL
     if is_service_enabled tls-proxy; then
         service_port=$NOVA_SERVICE_PORT_INT
+        service_protocol="http"
     fi
 
-    screen_it n-api "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api"
+    run_process n-api "$NOVA_BIN_DIR/nova-api"
     echo "Waiting for nova-api to start..."
-    if ! wait_for_service $SERVICE_TIMEOUT http://$SERVICE_HOST:$service_port; then
+    if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$SERVICE_HOST:$service_port; then
         die $LINENO "nova-api did not start"
     fi
 
     # Start proxies if enabled
     if is_service_enabled tls-proxy; then
         start_tls_proxy '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT &
+        start_tls_proxy '*' $EC2_SERVICE_PORT $NOVA_SERVICE_HOST $EC2_SERVICE_PORT_INT &
     fi
 }
 
@@ -684,17 +699,24 @@
 
     if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
         # The group **$LIBVIRT_GROUP** is added to the current user in this script.
-        # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group.
-        screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf'"
+        # sg' will be used in run_process to execute nova-compute as a member of the
+        # **$LIBVIRT_GROUP** group.
+        run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" $LIBVIRT_GROUP
     elif [[ "$VIRT_DRIVER" = 'fake' ]]; then
+        local i
         for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`; do
-            screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf --config-file <(echo -e '[DEFAULT]\nhost=${HOSTNAME}${i}')"
+            # Avoid process redirection of fake host configurations by
+            # creating or modifying real configurations. Each fake
+            # gets its own configuration and own log file.
+            local fake_conf="${NOVA_FAKE_CONF}-${i}"
+            iniset $fake_conf DEFAULT nhost "${HOSTNAME}${i}"
+            run_process "n-cpu-${i}" "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf --config-file $fake_conf"
         done
     else
         if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
             start_nova_hypervisor
         fi
-        screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf"
+        run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf"
     fi
 }
 
@@ -707,25 +729,25 @@
         local compute_cell_conf=$NOVA_CONF
     fi
 
-    # ``screen_it`` checks ``is_service_enabled``, it is not needed here
-    screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor --config-file $compute_cell_conf"
-    screen_it n-cell-region "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $api_cell_conf"
-    screen_it n-cell-child "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $compute_cell_conf"
+    # ``run_process`` checks ``is_service_enabled``, it is not needed here
+    run_process n-cond "$NOVA_BIN_DIR/nova-conductor --config-file $compute_cell_conf"
+    run_process n-cell-region "$NOVA_BIN_DIR/nova-cells --config-file $api_cell_conf"
+    run_process n-cell-child "$NOVA_BIN_DIR/nova-cells --config-file $compute_cell_conf"
 
-    screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert --config-file $api_cell_conf"
-    screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network --config-file $compute_cell_conf"
-    screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf"
-    screen_it n-api-meta "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf"
+    run_process n-crt "$NOVA_BIN_DIR/nova-cert --config-file $api_cell_conf"
+    run_process n-net "$NOVA_BIN_DIR/nova-network --config-file $compute_cell_conf"
+    run_process n-sch "$NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf"
+    run_process n-api-meta "$NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf"
 
-    screen_it n-novnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_WEB_DIR"
-    screen_it n-xvnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-xvpvncproxy --config-file $api_cell_conf"
-    screen_it n-spice "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $api_cell_conf --web $SPICE_WEB_DIR"
-    screen_it n-cauth "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-consoleauth --config-file $api_cell_conf"
+    run_process n-novnc "$NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_WEB_DIR"
+    run_process n-xvnc "$NOVA_BIN_DIR/nova-xvpvncproxy --config-file $api_cell_conf"
+    run_process n-spice "$NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $api_cell_conf --web $SPICE_WEB_DIR"
+    run_process n-cauth "$NOVA_BIN_DIR/nova-consoleauth --config-file $api_cell_conf"
 
     # Starting the nova-objectstore only if swift3 service is not enabled.
     # Swift will act as s3 objectstore.
     is_service_enabled swift3 || \
-        screen_it n-obj "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-objectstore --config-file $api_cell_conf"
+        run_process n-obj "$NOVA_BIN_DIR/nova-objectstore --config-file $api_cell_conf"
 }
 
 function start_nova {
@@ -734,7 +756,7 @@
 }
 
 function stop_nova_compute {
-    screen_stop n-cpu
+    stop_process n-cpu
     if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
         stop_nova_hypervisor
     fi
@@ -745,7 +767,7 @@
     # Some services are listed here twice since more than one instance
     # of a service may be running in certain configs.
     for serv in n-api n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-obj; do
-        screen_stop $serv
+        stop_process $serv
     done
 }
 
diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt
index 6fb5c38..6b9db48 100644
--- a/lib/nova_plugins/functions-libvirt
+++ b/lib/nova_plugins/functions-libvirt
@@ -37,7 +37,7 @@
     # and HP images used in the gate; rackspace has firewalld but hp
     # cloud doesn't.  RHEL6 doesn't have firewalld either.  So we
     # don't care if it fails.
-    if is_fedora; then
+    if is_fedora && is_package_installed firewalld; then
         sudo service firewalld restart || true
     fi
 }
@@ -57,7 +57,9 @@
 EOF
     fi
 
-    if is_ubuntu; then
+    # Since the release of Debian Wheezy the libvirt init script is libvirtd
+    # and not libvirtd-bin anymore.
+    if is_ubuntu && [ ! -f /etc/init.d/libvirtd ]; then
         LIBVIRT_DAEMON=libvirt-bin
     else
         LIBVIRT_DAEMON=libvirtd
diff --git a/lib/nova_plugins/hypervisor-baremetal b/lib/nova_plugins/hypervisor-baremetal
index 1d4d414..22d16a6 100644
--- a/lib/nova_plugins/hypervisor-baremetal
+++ b/lib/nova_plugins/hypervisor-baremetal
@@ -58,12 +58,6 @@
         sudo cp "$FILES/dnsmasq-for-baremetal-from-nova-network.conf" "$BM_DNSMASQ_CONF"
         iniset $NOVA_CONF DEFAULT dnsmasq_config_file "$BM_DNSMASQ_CONF"
     fi
-
-    # Define extra baremetal nova conf flags by defining the array ``EXTRA_BAREMETAL_OPTS``.
-    for I in "${EXTRA_BAREMETAL_OPTS[@]}"; do
-        # Attempt to convert flags to options
-        iniset $NOVA_CONF baremetal ${I/=/ }
-    done
 }
 
 # install_nova_hypervisor() - Install external components
diff --git a/lib/nova_plugins/hypervisor-fake b/lib/nova_plugins/hypervisor-fake
index e7a833f..dc93633 100644
--- a/lib/nova_plugins/hypervisor-fake
+++ b/lib/nova_plugins/hypervisor-fake
@@ -47,7 +47,7 @@
     iniset $NOVA_CONF DEFAULT quota_security_groups -1
     iniset $NOVA_CONF DEFAULT quota_security_group_rules -1
     iniset $NOVA_CONF DEFAULT quota_key_pairs -1
-    iniset $NOVA_CONF DEFAULT scheduler_default_filters "RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter"
+    iniset $NOVA_CONF DEFAULT scheduler_default_filters "RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,CoreFilter,RamFilter,DiskFilter"
 }
 
 # install_nova_hypervisor() - Install external components
diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic
index c068c74..4004cc9 100644
--- a/lib/nova_plugins/hypervisor-ironic
+++ b/lib/nova_plugins/hypervisor-ironic
@@ -37,12 +37,9 @@
     configure_libvirt
     LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.firewall.NoopFirewallDriver"}
 
-    # NOTE(adam_g): The ironic compute driver currently lives in the ironic
-    # tree.  We purposely configure Nova to load it from there until it moves
-    # back into Nova proper.
-    iniset $NOVA_CONF DEFAULT compute_driver ironic.nova.virt.ironic.IronicDriver
+    iniset $NOVA_CONF DEFAULT compute_driver nova.virt.ironic.IronicDriver
     iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER
-    iniset $NOVA_CONF DEFAULT scheduler_host_manager ironic.nova.scheduler.ironic_host_manager.IronicHostManager
+    iniset $NOVA_CONF DEFAULT scheduler_host_manager nova.scheduler.ironic_host_manager.IronicHostManager
     iniset $NOVA_CONF DEFAULT ram_allocation_ratio 1.0
     iniset $NOVA_CONF DEFAULT reserved_host_memory_mb 0
     # ironic section
@@ -51,11 +48,13 @@
     iniset $NOVA_CONF ironic admin_url $KEYSTONE_AUTH_URI/v2.0
     iniset $NOVA_CONF ironic admin_tenant_name demo
     iniset $NOVA_CONF ironic api_endpoint http://$SERVICE_HOST:6385/v1
-    iniset $NOVA_CONF ironic sql_connection `database_connection_url nova_bm`
 }
 
 # install_nova_hypervisor() - Install external components
 function install_nova_hypervisor {
+    if ! is_service_enabled neutron; then
+        die $LINENO "Neutron should be enabled for usage of the Ironic Nova driver."
+    fi
     install_libvirt
 }
 
diff --git a/lib/opendaylight b/lib/opendaylight
index 33b3f0a..1541ac1 100644
--- a/lib/opendaylight
+++ b/lib/opendaylight
@@ -139,6 +139,8 @@
     # The flags to ODL have the following meaning:
     #   -of13: runs ODL using OpenFlow 1.3 protocol support.
     #   -virt ovsdb: Runs ODL in "virtualization" mode with OVSDB support
+    # NOTE(chdent): Leaving this as screen_it instead of run_process until
+    # the right thing for this service is determined.
     screen_it odl-server "cd $ODL_DIR/opendaylight && JAVA_HOME=$JHOME ./run.sh $ODL_ARGS -of13 -virt ovsdb"
 
     # Sleep a bit to let OpenDaylight finish starting up
@@ -147,7 +149,7 @@
 
 # stop_opendaylight() - Stop running processes (non-screen)
 function stop_opendaylight {
-    screen_stop odl-server
+    stop_process odl-server
 }
 
 # stop_opendaylight-compute() - Remove OVS bridges
diff --git a/lib/oslo b/lib/oslo
index 421fbce..e5fa37e 100644
--- a/lib/oslo
+++ b/lib/oslo
@@ -22,10 +22,15 @@
 # --------
 CLIFF_DIR=$DEST/cliff
 OSLOCFG_DIR=$DEST/oslo.config
+OSLOCON_DIR=$DEST/oslo.concurrency
 OSLODB_DIR=$DEST/oslo.db
 OSLOI18N_DIR=$DEST/oslo.i18n
+OSLOLOG_DIR=$DEST/oslo.log
+OSLOMID_DIR=$DEST/oslo.middleware
 OSLOMSG_DIR=$DEST/oslo.messaging
 OSLORWRAP_DIR=$DEST/oslo.rootwrap
+OSLOSERIALIZATION_DIR=$DEST/oslo.serialization
+OSLOUTILS_DIR=$DEST/oslo.utils
 OSLOVMWARE_DIR=$DEST/oslo.vmware
 PYCADF_DIR=$DEST/pycadf
 STEVEDORE_DIR=$DEST/stevedore
@@ -39,19 +44,30 @@
 
 # install_oslo() - Collect source and prepare
 function install_oslo {
-    # TODO(sdague): remove this once we get to Icehouse, this just makes
-    # for a smoother transition of existing users.
-    cleanup_oslo
-
     git_clone $CLIFF_REPO $CLIFF_DIR $CLIFF_BRANCH
     setup_install $CLIFF_DIR
 
     git_clone $OSLOI18N_REPO $OSLOI18N_DIR $OSLOI18N_BRANCH
     setup_install $OSLOI18N_DIR
 
+    git_clone $OSLOUTILS_REPO $OSLOUTILS_DIR $OSLOUTILS_BRANCH
+    setup_install $OSLOUTILS_DIR
+
+    git_clone $OSLOSERIALIZATION_REPO $OSLOSERIALIZATION_DIR $OSLOSERIALIZATION_BRANCH
+    setup_install $OSLOSERIALIZATION_DIR
+
     git_clone $OSLOCFG_REPO $OSLOCFG_DIR $OSLOCFG_BRANCH
     setup_install $OSLOCFG_DIR
 
+    git_clone $OSLOCON_REPO $OSLOCON_DIR $OSLOCON_BRANCH
+    setup_install $OSLOCON_DIR
+
+    git_clone $OSLOLOG_REPO $OSLOLOG_DIR $OSLOLOG_BRANCH
+    setup_install $OSLOLOG_DIR
+
+    git_clone $OSLOMID_REPO $OSLOMID_DIR $OSLOMID_BRANCH
+    setup_install $OSLOMID_DIR
+
     git_clone $OSLOMSG_REPO $OSLOMSG_DIR $OSLOMSG_BRANCH
     setup_install $OSLOMSG_DIR
 
@@ -74,17 +90,6 @@
     setup_install $TASKFLOW_DIR
 }
 
-# cleanup_oslo() - purge possibly old versions of oslo
-function cleanup_oslo {
-    # this means we've got an old oslo installed, lets get rid of it
-    if ! python -c 'import oslo.config' 2>/dev/null; then
-        echo "Found old oslo.config... removing to ensure consistency"
-        local PIP_CMD=$(get_pip_command)
-        pip_install oslo.config
-        sudo $PIP_CMD uninstall -y oslo.config
-    fi
-}
-
 # Restore xtrace
 $XTRACE
 
diff --git a/lib/rpc_backend b/lib/rpc_backend
index e922daa..de82fe1 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -6,6 +6,7 @@
 #
 # - ``functions`` file
 # - ``RABBIT_{HOST|PASSWORD}`` must be defined when RabbitMQ is used
+# - ``RPC_MESSAGING_PROTOCOL`` option for configuring the messaging protocol
 
 # ``stack.sh`` calls the entry points in this order:
 #
@@ -26,6 +27,8 @@
 # Make sure we only have one rpc backend enabled.
 # Also check the specified rpc backend is available on your platform.
 function check_rpc_backend {
+    local c svc
+
     local rpc_needed=1
     # We rely on the fact that filenames in lib/* match the service names
     # that can be passed as arguments to is_service_enabled.
@@ -41,7 +44,7 @@
     local rpc_backend_cnt=0
     for svc in qpid zeromq rabbit; do
         is_service_enabled $svc &&
-        ((rpc_backend_cnt++))
+        (( rpc_backend_cnt++ )) || true
     done
     if [ "$rpc_backend_cnt" -gt 1 ]; then
         echo "ERROR: only one rpc backend may be enabled,"
@@ -88,25 +91,56 @@
             exit_distro_not_supported "zeromq installation"
         fi
     fi
+
+    # Remove the AMQP 1.0 messaging libraries
+    if [ "$RPC_MESSAGING_PROTOCOL" == "AMQP1" ]; then
+        if is_fedora; then
+            uninstall_package qpid-proton-c-devel
+            uninstall_package python-qpid-proton
+        fi
+        # TODO(kgiusti) ubuntu cleanup
+    fi
 }
 
 # install rpc backend
 function install_rpc_backend {
+    # Regardless of the broker used, if AMQP 1.0 is configured load
+    # the necessary messaging client libraries for oslo.messaging
+    if [ "$RPC_MESSAGING_PROTOCOL" == "AMQP1" ]; then
+        if is_fedora; then
+            install_package qpid-proton-c-devel
+            install_package python-qpid-proton
+        elif is_ubuntu; then
+            # TODO(kgiusti) The QPID AMQP 1.0 protocol libraries
+            # are not yet in the ubuntu repos. Enable these installs
+            # once they are present:
+            #install_package libqpid-proton2-dev
+            #install_package python-qpid-proton
+            # Also add 'uninstall' directives in cleanup_rpc_backend()!
+            exit_distro_not_supported "QPID AMQP 1.0 Proton libraries"
+        else
+            exit_distro_not_supported "QPID AMQP 1.0 Proton libraries"
+        fi
+        # Install pyngus client API
+        # TODO(kgiusti) can remove once python qpid bindings are
+        # available on all supported platforms _and_ pyngus is added
+        # to the requirements.txt file in oslo.messaging
+        pip_install pyngus
+    fi
+
     if is_service_enabled rabbit; then
         # Install rabbitmq-server
-        # the temp file is necessary due to LP: #878600
-        tfile=$(mktemp)
-        install_package rabbitmq-server > "$tfile" 2>&1
-        cat "$tfile"
-        rm -f "$tfile"
+        install_package rabbitmq-server
     elif is_service_enabled qpid; then
+        local qpid_conf_file=/etc/qpid/qpidd.conf
         if is_fedora; then
             install_package qpid-cpp-server
             if [[ $DISTRO =~ (rhel6) ]]; then
+                qpid_conf_file=/etc/qpidd.conf
                 # RHEL6 leaves "auth=yes" in /etc/qpidd.conf, it needs to
                 # be no or you get GSS authentication errors as it
                 # attempts to default to this.
-                sudo sed -i.bak 's/^auth=yes$/auth=no/' /etc/qpidd.conf
+                sudo sed -i.bak 's/^auth=yes$/auth=no/' $qpid_conf_file
             fi
         elif is_ubuntu; then
             install_package qpidd
@@ -115,6 +149,22 @@
         else
             exit_distro_not_supported "qpid installation"
         fi
+        # If AMQP 1.0 is specified, ensure that the version of the
+        # broker can support AMQP 1.0 and configure the queue and
+        # topic address patterns used by oslo.messaging.
+        if [ "$RPC_MESSAGING_PROTOCOL" == "AMQP1" ]; then
+            QPIDD=$(type -p qpidd)
+            if ! $QPIDD --help | grep -q "queue-patterns"; then
+                exit_distro_not_supported "qpidd with AMQP 1.0 support"
+            fi
+            if ! grep -q "queue-patterns=exclusive" $qpid_conf_file; then
+                cat <<EOF | sudo tee --append $qpid_conf_file
+queue-patterns=exclusive
+queue-patterns=unicast
+topic-patterns=broadcast
+EOF
+            fi
+        fi
     elif is_service_enabled zeromq; then
         # NOTE(ewindisch): Redis is not strictly necessary
         # but there is a matchmaker driver that works
@@ -132,6 +182,11 @@
         sudo mkdir -p /var/run/openstack
         sudo chown $STACK_USER /var/run/openstack
     fi
+
+    # If using the QPID broker, install the QPID python client API
+    if is_service_enabled qpid || [ -n "$QPID_HOST" ]; then
+        install_package python-qpid
+    fi
 }
 
 # restart the rpc backend
@@ -142,6 +197,7 @@
         # NOTE(bnemec): Retry initial rabbitmq configuration to deal with
         # the fact that sometimes it fails to start properly.
         # Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1059028
+        local i
         for i in `seq 10`; do
             if is_fedora || is_suse; then
                 # service is not started by default
@@ -177,7 +233,12 @@
         MATCHMAKER_REDIS_HOST=${MATCHMAKER_REDIS_HOST:-127.0.0.1}
         iniset $file matchmaker_redis host $MATCHMAKER_REDIS_HOST
     elif is_service_enabled qpid || [ -n "$QPID_HOST" ]; then
-        iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_qpid
+        # For Qpid use the 'amqp' oslo.messaging transport when AMQP 1.0 is used
+        if [ "$RPC_MESSAGING_PROTOCOL" == "AMQP1" ]; then
+            iniset $file $section rpc_backend "amqp"
+        else
+            iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_qpid
+        fi
         iniset $file $section qpid_hostname ${QPID_HOST:-$SERVICE_HOST}
         if is_ubuntu; then
             QPID_PASSWORD=`sudo strings /etc/qpid/qpidd.sasldb | grep -B1 admin | head -1`
diff --git a/lib/sahara b/lib/sahara
index 0cc2fe9..5c7c253 100644
--- a/lib/sahara
+++ b/lib/sahara
@@ -7,6 +7,7 @@
 # ``stack.sh`` calls the entry points in this order:
 #
 # install_sahara
+# install_python_saharaclient
 # configure_sahara
 # start_sahara
 # stop_sahara
@@ -24,8 +25,13 @@
 SAHARA_REPO=${SAHARA_REPO:-${GIT_BASE}/openstack/sahara.git}
 SAHARA_BRANCH=${SAHARA_BRANCH:-master}
 
+SAHARA_PYTHONCLIENT_REPO=${SAHARA_PYTHONCLIENT_REPO:-${GIT_BASE}/openstack/python-saharaclient.git}
+SAHARA_PYTHONCLIENT_BRANCH=${SAHARA_PYTHONCLIENT_BRANCH:-master}
+
 # Set up default directories
 SAHARA_DIR=$DEST/sahara
+SAHARA_PYTHONCLIENT_DIR=$DEST/python-saharaclient
+
 SAHARA_CONF_DIR=${SAHARA_CONF_DIR:-/etc/sahara}
 SAHARA_CONF_FILE=${SAHARA_CONF_DIR}/sahara.conf
 
@@ -57,18 +63,18 @@
 # service     sahara    admin
 function create_sahara_accounts {
 
-    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
+    local service_tenant=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    local admin_role=$(openstack role list | awk "/ admin / { print \$2 }")
 
-    SAHARA_USER=$(get_or_create_user "sahara" \
-        "$SERVICE_PASSWORD" $SERVICE_TENANT "sahara@example.com")
-    get_or_add_user_role $ADMIN_ROLE $SAHARA_USER $SERVICE_TENANT
+    local sahara_user=$(get_or_create_user "sahara" \
+        "$SERVICE_PASSWORD" $service_tenant)
+    get_or_add_user_role $admin_role $sahara_user $service_tenant
 
     if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
 
-        SAHARA_SERVICE=$(get_or_create_service "sahara" \
+        local sahara_service=$(get_or_create_service "sahara" \
             "data_processing" "Sahara Data Processing")
-        get_or_create_endpoint $SAHARA_SERVICE \
+        get_or_create_endpoint $sahara_service \
             "$REGION_NAME" \
             "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
             "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
@@ -100,16 +106,15 @@
     sudo chown $STACK_USER $SAHARA_AUTH_CACHE_DIR
     rm -rf $SAHARA_AUTH_CACHE_DIR/*
 
-    # Set actual keystone auth configs
-    iniset $SAHARA_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
-    iniset $SAHARA_CONF_FILE keystone_authtoken auth_host $KEYSTONE_AUTH_HOST
-    iniset $SAHARA_CONF_FILE keystone_authtoken auth_port $KEYSTONE_AUTH_PORT
-    iniset $SAHARA_CONF_FILE keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
-    iniset $SAHARA_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
-    iniset $SAHARA_CONF_FILE keystone_authtoken admin_user sahara
-    iniset $SAHARA_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD
-    iniset $SAHARA_CONF_FILE keystone_authtoken signing_dir $SAHARA_AUTH_CACHE_DIR
-    iniset $SAHARA_CONF_FILE keystone_authtoken cafile $KEYSTONE_SSL_CA
+    configure_auth_token_middleware $SAHARA_CONF_FILE sahara $SAHARA_AUTH_CACHE_DIR
+
+    # Set configuration to send notifications
+
+    if is_service_enabled ceilometer; then
+        iniset $SAHARA_CONF_FILE DEFAULT enable_notifications "true"
+        iniset $SAHARA_CONF_FILE DEFAULT notification_driver "messaging"
+        iniset_rpc_backend sahara $SAHARA_CONF_FILE DEFAULT
+    fi
 
     iniset $SAHARA_CONF_FILE DEFAULT verbose True
     iniset $SAHARA_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
@@ -146,9 +151,15 @@
     setup_develop $SAHARA_DIR
 }
 
+# install_python_saharaclient() - Collect source and prepare
+function install_python_saharaclient {
+    git_clone $SAHARA_PYTHONCLIENT_REPO $SAHARA_PYTHONCLIENT_DIR $SAHARA_PYTHONCLIENT_BRANCH
+    setup_develop $SAHARA_PYTHONCLIENT_DIR
+}
+
 # start_sahara() - Start running processes, including screen
 function start_sahara {
-    screen_it sahara "cd $SAHARA_DIR && $SAHARA_BIN_DIR/sahara-all --config-file $SAHARA_CONF_FILE"
+    run_process sahara "$SAHARA_BIN_DIR/sahara-all --config-file $SAHARA_CONF_FILE"
 }
 
 # stop_sahara() - Stop running processes
diff --git a/lib/sahara-dashboard b/lib/sahara-dashboard
deleted file mode 100644
index a81df0f..0000000
--- a/lib/sahara-dashboard
+++ /dev/null
@@ -1,72 +0,0 @@
-# lib/sahara-dashboard
-
-# Dependencies:
-#
-# - ``functions`` file
-# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
-# - ``SERVICE_HOST``
-
-# ``stack.sh`` calls the entry points in this order:
-#
-# - install_sahara_dashboard
-# - configure_sahara_dashboard
-# - cleanup_sahara_dashboard
-
-# Save trace setting
-XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-source $TOP_DIR/lib/horizon
-
-# Defaults
-# --------
-
-# Set up default repos
-SAHARA_DASHBOARD_REPO=${SAHARA_DASHBOARD_REPO:-${GIT_BASE}/openstack/sahara-dashboard.git}
-SAHARA_DASHBOARD_BRANCH=${SAHARA_DASHBOARD_BRANCH:-master}
-
-SAHARA_PYTHONCLIENT_REPO=${SAHARA_PYTHONCLIENT_REPO:-${GIT_BASE}/openstack/python-saharaclient.git}
-SAHARA_PYTHONCLIENT_BRANCH=${SAHARA_PYTHONCLIENT_BRANCH:-master}
-
-# Set up default directories
-SAHARA_DASHBOARD_DIR=$DEST/sahara-dashboard
-SAHARA_PYTHONCLIENT_DIR=$DEST/python-saharaclient
-
-# Functions
-# ---------
-
-function configure_sahara_dashboard {
-
-    echo -e "AUTO_ASSIGNMENT_ENABLED = False" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py
-    echo -e "HORIZON_CONFIG['dashboards'] += ('sahara',)" >> $HORIZON_DIR/openstack_dashboard/settings.py
-    echo -e "INSTALLED_APPS += ('saharadashboard',)" >> $HORIZON_DIR/openstack_dashboard/settings.py
-
-    if is_service_enabled neutron; then
-        echo -e "SAHARA_USE_NEUTRON = True" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py
-    fi
-}
-
-# install_sahara_dashboard() - Collect source and prepare
-function install_sahara_dashboard {
-    install_python_saharaclient
-    git_clone $SAHARA_DASHBOARD_REPO $SAHARA_DASHBOARD_DIR $SAHARA_DASHBOARD_BRANCH
-    setup_develop $SAHARA_DASHBOARD_DIR
-}
-
-function install_python_saharaclient {
-    git_clone $SAHARA_PYTHONCLIENT_REPO $SAHARA_PYTHONCLIENT_DIR $SAHARA_PYTHONCLIENT_BRANCH
-    setup_develop $SAHARA_PYTHONCLIENT_DIR
-}
-
-# Cleanup file settings.py from Sahara
-function cleanup_sahara_dashboard {
-    sed -i '/sahara/d' $HORIZON_DIR/openstack_dashboard/settings.py
-}
-
-# Restore xtrace
-$XTRACE
-
-# Local variables:
-# mode: shell-script
-# End:
-
diff --git a/lib/stackforge b/lib/stackforge
index e6528af..2d80dad 100644
--- a/lib/stackforge
+++ b/lib/stackforge
@@ -29,35 +29,21 @@
 # --------
 WSME_DIR=$DEST/wsme
 PECAN_DIR=$DEST/pecan
+SQLALCHEMY_MIGRATE_DIR=$DEST/sqlalchemy-migrate
 
 # Entry Points
 # ------------
 
 # install_stackforge() - Collect source and prepare
 function install_stackforge {
-    # TODO(sdague): remove this once we get to Icehouse, this just makes
-    # for a smoother transition of existing users.
-    cleanup_stackforge
-
     git_clone $WSME_REPO $WSME_DIR $WSME_BRANCH
     setup_package $WSME_DIR
 
     git_clone $PECAN_REPO $PECAN_DIR $PECAN_BRANCH
     setup_package $PECAN_DIR
-}
 
-# cleanup_stackforge() - purge possibly old versions of stackforge libraries
-function cleanup_stackforge {
-    # this means we've got an old version installed, lets get rid of it
-    # otherwise python hates itself
-    for lib in wsme pecan; do
-        if ! python -c "import $lib" 2>/dev/null; then
-            echo "Found old $lib... removing to ensure consistency"
-            local PIP_CMD=$(get_pip_command)
-            pip_install $lib
-            sudo $PIP_CMD uninstall -y $lib
-        fi
-    done
+    git_clone $SQLALCHEMY_MIGRATE_REPO $SQLALCHEMY_MIGRATE_DIR $SQLALCHEMY_MIGRATE_BRANCH
+    setup_package $SQLALCHEMY_MIGRATE_DIR
 }
 
 # Restore xtrace
diff --git a/lib/swift b/lib/swift
index 2b161c3..8139552 100644
--- a/lib/swift
+++ b/lib/swift
@@ -29,6 +29,10 @@
 # Defaults
 # --------
 
+if is_ssl_enabled_service "s-proxy" || is_service_enabled tls-proxy; then
+    SWIFT_SERVICE_PROTOCOL="https"
+fi
+
 # Set up default directories
 SWIFT_DIR=$DEST/swift
 SWIFTCLIENT_DIR=$DEST/python-swiftclient
@@ -36,6 +40,9 @@
 SWIFT_APACHE_WSGI_DIR=${SWIFT_APACHE_WSGI_DIR:-/var/www/swift}
 SWIFT3_DIR=$DEST/swift3
 
+SWIFT_SERVICE_PROTOCOL=${SWIFT_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
+SWIFT_DEFAULT_BIND_PORT_INT=${SWIFT_DEFAULT_BIND_PORT_INT:-8081}
+
 # TODO: add logging to different location.
 
 # Set ``SWIFT_DATA_DIR`` to the location of swift drives and objects.
@@ -115,6 +122,10 @@
 CONTAINER_PORT_BASE=${CONTAINER_PORT_BASE:-6011}
 ACCOUNT_PORT_BASE=${ACCOUNT_PORT_BASE:-6012}
 
+# Enable tempurl feature
+SWIFT_ENABLE_TEMPURLS=${SWIFT_ENABLE_TEMPURLS:-False}
+SWIFT_TEMPURL_KEY=${SWIFT_TEMPURL_KEY}
+
 # Tell Tempest this project is present
 TEMPEST_SERVICES+=,swift
 
@@ -150,9 +161,10 @@
 function _cleanup_swift_apache_wsgi {
     sudo rm -f $SWIFT_APACHE_WSGI_DIR/*.wsgi
     disable_apache_site proxy-server
+    local node_number type
     for node_number in ${SWIFT_REPLICAS_SEQ}; do
         for type in object container account; do
-            site_name=${type}-server-${node_number}
+            local site_name=${type}-server-${node_number}
             disable_apache_site ${site_name}
             sudo rm -f $(apache_site_config_for ${site_name})
         done
@@ -182,10 +194,11 @@
     " -i ${SWIFT_APACHE_WSGI_DIR}/proxy-server.wsgi
 
     # copy apache vhost file and set name and port
+    local node_number
     for node_number in ${SWIFT_REPLICAS_SEQ}; do
-        object_port=$[OBJECT_PORT_BASE + 10 * ($node_number - 1)]
-        container_port=$[CONTAINER_PORT_BASE + 10 * ($node_number - 1)]
-        account_port=$[ACCOUNT_PORT_BASE + 10 * ($node_number - 1)]
+        local object_port=$[OBJECT_PORT_BASE + 10 * ($node_number - 1)]
+        local container_port=$[CONTAINER_PORT_BASE + 10 * ($node_number - 1)]
+        local account_port=$[ACCOUNT_PORT_BASE + 10 * ($node_number - 1)]
 
         sudo cp ${SWIFT_DIR}/examples/apache2/object-server.template $(apache_site_config_for object-server-${node_number})
         sudo sed -e "
@@ -245,7 +258,7 @@
     local server_type=$4
 
     log_facility=$[ node_id - 1 ]
-    node_path=${SWIFT_DATA_DIR}/${node_number}
+    local node_path=${SWIFT_DATA_DIR}/${node_number}
 
     iniuncomment ${swift_node_config} DEFAULT user
     iniset ${swift_node_config} DEFAULT user ${STACK_USER}
@@ -263,7 +276,7 @@
     iniset ${swift_node_config} DEFAULT log_facility LOG_LOCAL${log_facility}
 
     iniuncomment ${swift_node_config} DEFAULT workers
-    iniset ${swift_node_config} DEFAULT workers 1
+    iniset ${swift_node_config} DEFAULT workers ${API_WORKERS:-1}
 
     iniuncomment ${swift_node_config} DEFAULT disable_fallocate
     iniset ${swift_node_config} DEFAULT disable_fallocate true
@@ -328,7 +341,18 @@
     iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level DEBUG
 
     iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port
-    iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080}
+    if is_service_enabled tls-proxy; then
+        iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT_INT}
+    else
+        iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080}
+    fi
+
+    if is_ssl_enabled_service s-proxy; then
+        ensure_certificates SWIFT
+
+        iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT cert_file "$SWIFT_SSL_CERT"
+        iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT key_file "$SWIFT_SSL_KEY"
+    fi
 
     # Devstack is commonly run in a small slow environment, so bump the
     # timeouts up.
@@ -376,15 +400,7 @@
 
     # Configure Keystone
     sed -i '/^# \[filter:authtoken\]/,/^# \[filter:keystoneauth\]$/ s/^#[ \t]*//' ${SWIFT_CONFIG_PROXY_SERVER}
-    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_host $KEYSTONE_AUTH_HOST
-    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_port $KEYSTONE_AUTH_PORT
-    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
-    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken cafile $KEYSTONE_SSL_CA
-    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
-    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME
-    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_user swift
-    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_password $SERVICE_PASSWORD
-    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken signing_dir $SWIFT_AUTH_CACHE_DIR
+    configure_auth_token_middleware ${SWIFT_CONFIG_PROXY_SERVER} swift $SWIFT_AUTH_CACHE_DIR filter:authtoken
     # This causes the authtoken middleware to use the same python logging
     # adapter provided by the swift proxy-server, so that request transaction
     # IDs will included in all of its log messages.
@@ -403,7 +419,7 @@
 auth_port = ${KEYSTONE_AUTH_PORT}
 auth_host = ${KEYSTONE_AUTH_HOST}
 auth_protocol = ${KEYSTONE_AUTH_PROTOCOL}
-cafile = ${KEYSTONE_SSL_CA}
+cafile = ${SSL_BUNDLE_FILE}
 auth_token = ${SERVICE_TOKEN}
 admin_token = ${SERVICE_TOKEN}
 
@@ -416,10 +432,11 @@
     iniset ${SWIFT_CONF_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH}
     iniset ${SWIFT_CONF_DIR}/swift.conf swift-constraints max_header_size ${SWIFT_MAX_HEADER_SIZE}
 
+    local node_number
     for node_number in ${SWIFT_REPLICAS_SEQ}; do
-        swift_node_config=${SWIFT_CONF_DIR}/object-server/${node_number}.conf
+        local swift_node_config=${SWIFT_CONF_DIR}/object-server/${node_number}.conf
         cp ${SWIFT_DIR}/etc/object-server.conf-sample ${swift_node_config}
-        generate_swift_config ${swift_node_config} ${node_number} $[OBJECT_PORT_BASE + 10 * (node_number - 1)] object
+        generate_swift_config ${swift_node_config} ${node_number} $(( OBJECT_PORT_BASE + 10 * (node_number - 1) )) object
         iniset ${swift_node_config} filter:recon recon_cache_path  ${SWIFT_DATA_DIR}/cache
         # Using a sed and not iniset/iniuncomment because we want to a global
         # modification and make sure it works for new sections.
@@ -427,14 +444,14 @@
 
         swift_node_config=${SWIFT_CONF_DIR}/container-server/${node_number}.conf
         cp ${SWIFT_DIR}/etc/container-server.conf-sample ${swift_node_config}
-        generate_swift_config ${swift_node_config} ${node_number} $[CONTAINER_PORT_BASE + 10 * (node_number - 1)] container
+        generate_swift_config ${swift_node_config} ${node_number} $(( CONTAINER_PORT_BASE + 10 * (node_number - 1) )) container
         iniuncomment ${swift_node_config} app:container-server allow_versions
         iniset ${swift_node_config} app:container-server allow_versions  "true"
         sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config}
 
         swift_node_config=${SWIFT_CONF_DIR}/account-server/${node_number}.conf
         cp ${SWIFT_DIR}/etc/account-server.conf-sample ${swift_node_config}
-        generate_swift_config ${swift_node_config} ${node_number} $[ACCOUNT_PORT_BASE + 10 * (node_number - 1)] account
+        generate_swift_config ${swift_node_config} ${node_number} $(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) )) account
         sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config}
     done
 
@@ -460,7 +477,7 @@
         iniset ${testfile} func_test auth_prefix /v2.0/
     fi
 
-    swift_log_dir=${SWIFT_DATA_DIR}/logs
+    local swift_log_dir=${SWIFT_DATA_DIR}/logs
     rm -rf ${swift_log_dir}
     mkdir -p ${swift_log_dir}/hourly
     sudo chown -R ${STACK_USER}:adm ${swift_log_dir}
@@ -484,9 +501,9 @@
     # First do a bit of setup by creating the directories and
     # changing the permissions so we can run it as our user.
 
-    USER_GROUP=$(id -g ${STACK_USER})
+    local user_group=$(id -g ${STACK_USER})
     sudo mkdir -p ${SWIFT_DATA_DIR}/{drives,cache,run,logs}
-    sudo chown -R ${STACK_USER}:${USER_GROUP} ${SWIFT_DATA_DIR}
+    sudo chown -R ${STACK_USER}:${user_group} ${SWIFT_DATA_DIR}
 
     # Create a loopback disk and format it to XFS.
     if [[ -e ${SWIFT_DISK_IMAGE} ]]; then
@@ -514,15 +531,16 @@
 
     # Create a link to the above mount and
     # create all of the directories needed to emulate a few different servers
+    local node_number
     for node_number in ${SWIFT_REPLICAS_SEQ}; do
         sudo ln -sf ${SWIFT_DATA_DIR}/drives/sdb1/$node_number ${SWIFT_DATA_DIR}/$node_number;
-        drive=${SWIFT_DATA_DIR}/drives/sdb1/${node_number}
-        node=${SWIFT_DATA_DIR}/${node_number}/node
-        node_device=${node}/sdb1
+        local drive=${SWIFT_DATA_DIR}/drives/sdb1/${node_number}
+        local node=${SWIFT_DATA_DIR}/${node_number}/node
+        local node_device=${node}/sdb1
         [[ -d $node ]] && continue
         [[ -d $drive ]] && continue
-        sudo install -o ${STACK_USER} -g $USER_GROUP -d $drive
-        sudo install -o ${STACK_USER} -g $USER_GROUP -d $node_device
+        sudo install -o ${STACK_USER} -g $user_group -d $drive
+        sudo install -o ${STACK_USER} -g $user_group -d $node_device
         sudo chown -R ${STACK_USER}: ${node}
     done
 }
@@ -540,49 +558,50 @@
 
 function create_swift_accounts {
     # Defines specific passwords used by tools/create_userrc.sh
-    SWIFTUSERTEST1_PASSWORD=testing
-    SWIFTUSERTEST2_PASSWORD=testing2
-    SWIFTUSERTEST3_PASSWORD=testing3
+    local swiftusertest1_password=testing
+    local swiftusertest2_password=testing2
+    local swiftusertest3_password=testing3
 
     KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-sql}
 
-    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
+    local service_tenant=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    local admin_role=$(openstack role list | awk "/ admin / { print \$2 }")
+    local another_role=$(openstack role list | awk "/ anotherrole / { print \$2 }")
 
-    SWIFT_USER=$(get_or_create_user "swift" \
-        "$SERVICE_PASSWORD" $SERVICE_TENANT "swift@example.com")
-    get_or_add_user_role $ADMIN_ROLE $SWIFT_USER $SERVICE_TENANT
+    local swift_user=$(get_or_create_user "swift" \
+        "$SERVICE_PASSWORD" $service_tenant)
+    get_or_add_user_role $admin_role $swift_user $service_tenant
 
     if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
 
-        SWIFT_SERVICE=$(get_or_create_service "swift" \
+        local swift_service=$(get_or_create_service "swift" \
             "object-store" "Swift Service")
-        get_or_create_endpoint $SWIFT_SERVICE \
+        get_or_create_endpoint $swift_service \
             "$REGION_NAME" \
-            "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" \
-            "http://$SERVICE_HOST:8080" \
-            "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s"
+            "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" \
+            "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:8080" \
+            "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s"
     fi
 
-    SWIFT_TENANT_TEST1=$(get_or_create_project swifttenanttest1)
-    die_if_not_set $LINENO SWIFT_TENANT_TEST1 "Failure creating SWIFT_TENANT_TEST1"
-    SWIFT_USER_TEST1=$(get_or_create_user swiftusertest1 $SWIFTUSERTEST1_PASSWORD \
-        "$SWIFT_TENANT_TEST1" "test@example.com")
+    local swift_tenant_test1=$(get_or_create_project swifttenanttest1)
+    die_if_not_set $LINENO swift_tenant_test1 "Failure creating swift_tenant_test1"
+    SWIFT_USER_TEST1=$(get_or_create_user swiftusertest1 $swiftusertest1_password \
+        "$swift_tenant_test1" "test@example.com")
     die_if_not_set $LINENO SWIFT_USER_TEST1 "Failure creating SWIFT_USER_TEST1"
-    get_or_add_user_role $ADMIN_ROLE $SWIFT_USER_TEST1 $SWIFT_TENANT_TEST1
+    get_or_add_user_role $admin_role $SWIFT_USER_TEST1 $swift_tenant_test1
 
-    SWIFT_USER_TEST3=$(get_or_create_user swiftusertest3 $SWIFTUSERTEST3_PASSWORD \
-        "$SWIFT_TENANT_TEST1" "test3@example.com")
-    die_if_not_set $LINENO SWIFT_USER_TEST3 "Failure creating SWIFT_USER_TEST3"
-    get_or_add_user_role $ANOTHER_ROLE $SWIFT_USER_TEST3 $SWIFT_TENANT_TEST1
+    local swift_user_test3=$(get_or_create_user swiftusertest3 $swiftusertest3_password \
+        "$swift_tenant_test1" "test3@example.com")
+    die_if_not_set $LINENO swift_user_test3 "Failure creating swift_user_test3"
+    get_or_add_user_role $another_role $swift_user_test3 $swift_tenant_test1
 
-    SWIFT_TENANT_TEST2=$(get_or_create_project swifttenanttest2)
-    die_if_not_set $LINENO SWIFT_TENANT_TEST2 "Failure creating SWIFT_TENANT_TEST2"
+    local swift_tenant_test2=$(get_or_create_project swifttenanttest2)
+    die_if_not_set $LINENO swift_tenant_test2 "Failure creating swift_tenant_test2"
 
-    SWIFT_USER_TEST2=$(get_or_create_user swiftusertest2 $SWIFTUSERTEST2_PASSWORD \
-        "$SWIFT_TENANT_TEST2" "test2@example.com")
-    die_if_not_set $LINENO SWIFT_USER_TEST2 "Failure creating SWIFT_USER_TEST2"
-    get_or_add_user_role $ADMIN_ROLE $SWIFT_USER_TEST2 $SWIFT_TENANT_TEST2
+    local swift_user_test2=$(get_or_create_user swiftusertest2 $swiftusertest2_password \
+        "$swift_tenant_test2" "test2@example.com")
+    die_if_not_set $LINENO swift_user_test2 "Failure creating swift_user_test2"
+    get_or_add_user_role $admin_role $swift_user_test2 $swift_tenant_test2
 }
 
 # init_swift() - Initialize rings
@@ -605,9 +624,9 @@
         swift-ring-builder account.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1
 
         for node_number in ${SWIFT_REPLICAS_SEQ}; do
-            swift-ring-builder object.builder add z${node_number}-127.0.0.1:$[OBJECT_PORT_BASE + 10 * (node_number - 1)]/sdb1 1
-            swift-ring-builder container.builder add z${node_number}-127.0.0.1:$[CONTAINER_PORT_BASE + 10 * (node_number - 1)]/sdb1 1
-            swift-ring-builder account.builder add z${node_number}-127.0.0.1:$[ACCOUNT_PORT_BASE + 10 * (node_number - 1)]/sdb1 1
+            swift-ring-builder object.builder add z${node_number}-127.0.0.1:$(( OBJECT_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1
+            swift-ring-builder container.builder add z${node_number}-127.0.0.1:$(( CONTAINER_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1
+            swift-ring-builder account.builder add z${node_number}-127.0.0.1:$(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1
         done
         swift-ring-builder object.builder rebalance
         swift-ring-builder container.builder rebalance
@@ -650,10 +669,10 @@
     if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then
         restart_apache_server
         swift-init --run-dir=${SWIFT_DATA_DIR}/run rest start
-        screen_it s-proxy "cd $SWIFT_DIR && sudo tail -f /var/log/$APACHE_NAME/proxy-server"
+        tail_log s-proxy /var/log/$APACHE_NAME/proxy-server
         if [[ ${SWIFT_REPLICAS} == 1 ]]; then
             for type in object container account; do
-                screen_it s-${type} "cd $SWIFT_DIR && sudo tail -f /var/log/$APACHE_NAME/${type}-server-1"
+                tail_log s-${type} /var/log/$APACHE_NAME/${type}-server-1
             done
         fi
         return 0
@@ -666,6 +685,7 @@
     # service so we can run it in foreground in screen.  ``swift-init ...
     # {stop|restart}`` exits with '1' if no servers are running, ignore it just
     # in case
+    local todo type
     swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true
     if [[ ${SWIFT_REPLICAS} == 1 ]]; then
         todo="object container account"
@@ -673,16 +693,25 @@
     for type in proxy ${todo}; do
         swift-init --run-dir=${SWIFT_DATA_DIR}/run ${type} stop || true
     done
-    screen_it s-proxy "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v"
+    if is_service_enabled tls-proxy; then
+        local proxy_port=${SWIFT_DEFAULT_BIND_PORT:-8080}
+        start_tls_proxy '*' $proxy_port $SERVICE_HOST $SWIFT_DEFAULT_BIND_PORT_INT &
+    fi
+    run_process s-proxy "$SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v"
     if [[ ${SWIFT_REPLICAS} == 1 ]]; then
         for type in object container account; do
-            screen_it s-${type} "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v"
+            run_process s-${type} "$SWIFT_DIR/bin/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v"
         done
     fi
+
+    if [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]]; then
+        swift_configure_tempurls
+    fi
 }
 
 # stop_swift() - Stop running processes (non-screen)
 function stop_swift {
+    local type
 
     if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then
         swift-init --run-dir=${SWIFT_DATA_DIR}/run rest stop && return 0
@@ -693,14 +722,21 @@
         swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
     fi
     # Dump all of the servers
-    # Maintain the iteration as screen_stop() has some desirable side-effects
+    # Maintain the iteration as stop_process() has some desirable side-effects
     for type in proxy object container account; do
-        screen_stop s-${type}
+        stop_process s-${type}
     done
     # Blast out any stragglers
     pkill -f swift-
 }
 
+function swift_configure_tempurls {
+    OS_USERNAME=swift \
+        OS_TENANT_NAME=$SERVICE_TENANT_NAME \
+        OS_PASSWORD=$SERVICE_PASSWORD \
+        swift post -m "Temp-URL-Key: $SWIFT_TEMPURL_KEY"
+}
+
 # Restore xtrace
 $XTRACE
 
diff --git a/lib/tempest b/lib/tempest
index 59c5bbc..d677c7e 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -48,11 +48,16 @@
 TEMPEST_CONFIG_DIR=${TEMPEST_CONFIG_DIR:-$TEMPEST_DIR/etc}
 TEMPEST_CONFIG=$TEMPEST_CONFIG_DIR/tempest.conf
 TEMPEST_STATE_PATH=${TEMPEST_STATE_PATH:=$DATA_DIR/tempest}
+TEMPEST_LIB_DIR=$DEST/tempest-lib
 
 NOVA_SOURCE_DIR=$DEST/nova
 
 BUILD_INTERVAL=1
-BUILD_TIMEOUT=196
+
+# This is the timeout that tempest will wait for a VM to change state,
+# spawn, delete, etc.
+# The default is set to 196 seconds.
+BUILD_TIMEOUT=${BUILD_TIMEOUT:-196}
 
 
 BOTO_MATERIALS_PATH="$FILES/images/s3-materials/cirros-${CIRROS_VERSION}"
@@ -108,6 +113,8 @@
             image_uuid_alt="$IMAGE_UUID"
         fi
         images+=($IMAGE_UUID)
+    # TODO(stevemar): update this command to use openstackclient's `openstack image list`
+    # when it supports listing by status.
     done < <(glance image-list --status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }')
 
     case "${#images[*]}" in
@@ -283,11 +290,12 @@
     iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method
 
     # Compute Features
-    iniset $TEMPEST_CONFIG compute-feature-enabled api_v3 ${TEMPEST_NOVA_API_V3:-False}
     iniset $TEMPEST_CONFIG compute-feature-enabled resize True
     iniset $TEMPEST_CONFIG compute-feature-enabled live_migration ${LIVE_MIGRATION_AVAILABLE:-False}
     iniset $TEMPEST_CONFIG compute-feature-enabled change_password False
     iniset $TEMPEST_CONFIG compute-feature-enabled block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False}
+    iniset $TEMPEST_CONFIG compute-feature-enabled api_extensions ${COMPUTE_API_EXTENSIONS:-"all"}
+    iniset $TEMPEST_CONFIG compute-feature-disabled api_extensions ${DISABLE_COMPUTE_API_EXTENSIONS}
 
     # Compute admin
     iniset $TEMPEST_CONFIG "compute-admin" username $ADMIN_USERNAME
@@ -302,14 +310,16 @@
     iniset $TEMPEST_CONFIG network default_network "$FIXED_RANGE"
     iniset $TEMPEST_CONFIG network-feature-enabled ipv6 "$IPV6_ENABLED"
     iniset $TEMPEST_CONFIG network-feature-enabled ipv6_subnet_attributes "$IPV6_SUBNET_ATTRIBUTES_ENABLED"
+    iniset $TEMPEST_CONFIG network-feature-enabled api_extensions ${NETWORK_API_EXTENSIONS:-"all"}
+    iniset $TEMPEST_CONFIG network-feature-disabled api_extensions ${DISABLE_NETWORK_API_EXTENSIONS}
 
     # boto
-    iniset $TEMPEST_CONFIG boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud"
+    iniset $TEMPEST_CONFIG boto ec2_url "$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:8773/services/Cloud"
     iniset $TEMPEST_CONFIG boto s3_url "http://$SERVICE_HOST:${S3_SERVICE_PORT:-3333}"
     iniset $TEMPEST_CONFIG boto s3_materials_path "$BOTO_MATERIALS_PATH"
-    iniset $TEMPEST_CONFIG boto ari_manifest cirros-${CIRROS_VERSION}-x86_64-initrd.manifest.xml
-    iniset $TEMPEST_CONFIG boto ami_manifest cirros-${CIRROS_VERSION}-x86_64-blank.img.manifest.xml
-    iniset $TEMPEST_CONFIG boto aki_manifest cirros-${CIRROS_VERSION}-x86_64-vmlinuz.manifest.xml
+    iniset $TEMPEST_CONFIG boto ari_manifest cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-initrd.manifest.xml
+    iniset $TEMPEST_CONFIG boto ami_manifest cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-blank.img.manifest.xml
+    iniset $TEMPEST_CONFIG boto aki_manifest cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-vmlinuz.manifest.xml
     iniset $TEMPEST_CONFIG boto instance_type "$boto_instance_type"
     iniset $TEMPEST_CONFIG boto http_socket_timeout 30
     iniset $TEMPEST_CONFIG boto ssh_user ${DEFAULT_INSTANCE_USER:-cirros}
@@ -329,10 +339,10 @@
     fi
 
     # Scenario
-    iniset $TEMPEST_CONFIG scenario img_dir "$FILES/images/cirros-${CIRROS_VERSION}-x86_64-uec"
-    iniset $TEMPEST_CONFIG scenario ami_img_file "cirros-${CIRROS_VERSION}-x86_64-blank.img"
-    iniset $TEMPEST_CONFIG scenario ari_img_file "cirros-${CIRROS_VERSION}-x86_64-initrd"
-    iniset $TEMPEST_CONFIG scenario aki_img_file "cirros-${CIRROS_VERSION}-x86_64-vmlinuz"
+    iniset $TEMPEST_CONFIG scenario img_dir "$FILES/images/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec"
+    iniset $TEMPEST_CONFIG scenario ami_img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-blank.img"
+    iniset $TEMPEST_CONFIG scenario ari_img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-initrd"
+    iniset $TEMPEST_CONFIG scenario aki_img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-vmlinuz"
 
     # Large Ops Number
     iniset $TEMPEST_CONFIG scenario large_ops_number ${TEMPEST_LARGE_OPS_NUMBER:-0}
@@ -342,7 +352,13 @@
     # Once Tempest retires support for icehouse this flag can be removed.
     iniset $TEMPEST_CONFIG telemetry too_slow_to_test "False"
 
+    # Object storage
+    iniset $TEMPEST_CONFIG object-storage-feature-enabled discoverable_apis ${OBJECT_STORAGE_API_EXTENSIONS:-"all"}
+    iniset $TEMPEST_CONFIG object-storage-feature-disabled discoverable_apis ${OBJECT_STORAGE_DISABLE_API_EXTENSIONS}
+
     # Volume
+    iniset $TEMPEST_CONFIG volume-feature-enabled api_extensions ${VOLUME_API_EXTENSIONS:-"all"}
+    iniset $TEMPEST_CONFIG volume-feature-disabled api_extensions ${DISABLE_VOLUME_API_EXTENSIONS}
     if ! is_service_enabled c-bak; then
         iniset $TEMPEST_CONFIG volume-feature-enabled backup False
     fi
@@ -354,7 +370,7 @@
     fi
 
     if [ $TEMPEST_VOLUME_DRIVER != "default" ]; then
-        iniset $TEMPEST_CONFIG volume vendor_name $TEMPEST_VOLUME_VENDOR
+        iniset $TEMPEST_CONFIG volume vendor_name "$TEMPEST_VOLUME_VENDOR"
         iniset $TEMPEST_CONFIG volume storage_protocol $TEMPEST_STORAGE_PROTOCOL
     fi
 
@@ -365,12 +381,19 @@
     # cli
     iniset $TEMPEST_CONFIG cli cli_dir $NOVA_BIN_DIR
 
-    # Networking
-    iniset $TEMPEST_CONFIG network-feature-enabled api_extensions "${NETWORK_API_EXTENSIONS:-all}"
-
     # Baremetal
     if [ "$VIRT_DRIVER" = "ironic" ] ; then
         iniset $TEMPEST_CONFIG baremetal driver_enabled True
+        iniset $TEMPEST_CONFIG compute-feature-enabled change_password False
+        iniset $TEMPEST_CONFIG compute-feature-enabled console_output False
+        iniset $TEMPEST_CONFIG compute-feature-enabled interface_attach False
+        iniset $TEMPEST_CONFIG compute-feature-enabled live_migration False
+        iniset $TEMPEST_CONFIG compute-feature-enabled pause False
+        iniset $TEMPEST_CONFIG compute-feature-enabled rescue False
+        iniset $TEMPEST_CONFIG compute-feature-enabled resize False
+        iniset $TEMPEST_CONFIG compute-feature-enabled shelve False
+        iniset $TEMPEST_CONFIG compute-feature-enabled snapshot False
+        iniset $TEMPEST_CONFIG compute-feature-enabled suspend False
     fi
 
     # service_available
@@ -403,16 +426,23 @@
     fi
 }
 
+# install_tempest_lib() - Collect source, prepare, and install tempest-lib
+function install_tempest_lib {
+    git_clone $TEMPEST_LIB_REPO $TEMPEST_LIB_DIR $TEMPEST_LIB_BRANCH
+    setup_develop $TEMPEST_LIB_DIR
+}
+
 # install_tempest() - Collect source and prepare
 function install_tempest {
+    install_tempest_lib
     git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH
-    pip_install "tox<1.7"
+    pip_install tox
 }
 
 # init_tempest() - Initialize ec2 images
 function init_tempest {
-    local base_image_name=cirros-${CIRROS_VERSION}-x86_64
-    # /opt/stack/devstack/files/images/cirros-${CIRROS_VERSION}-x86_64-uec
+    local base_image_name=cirros-${CIRROS_VERSION}-${CIRROS_ARCH}
+    # /opt/stack/devstack/files/images/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec
     local image_dir="$FILES/images/${base_image_name}-uec"
     local kernel="$image_dir/${base_image_name}-vmlinuz"
     local ramdisk="$image_dir/${base_image_name}-initrd"
@@ -421,12 +451,13 @@
     if [ -f "$kernel" -a -f "$ramdisk" -a -f "$disk_image" -a  "$VIRT_DRIVER" != "openvz" \
         -a \( "$LIBVIRT_TYPE" != "lxc" -o "$VIRT_DRIVER" != "libvirt" \) ]; then
         echo "Prepare aki/ari/ami Images"
+        mkdir -p $BOTO_MATERIALS_PATH
         ( #new namespace
             # tenant:demo ; user: demo
             source $TOP_DIR/accrc/demo/demo
-            euca-bundle-image -r x86_64 -i "$kernel" --kernel true -d "$BOTO_MATERIALS_PATH"
-            euca-bundle-image -r x86_64 -i "$ramdisk" --ramdisk true -d "$BOTO_MATERIALS_PATH"
-            euca-bundle-image -r x86_64 -i "$disk_image" -d "$BOTO_MATERIALS_PATH"
+            euca-bundle-image -r ${CIRROS_ARCH} -i "$kernel" --kernel true -d "$BOTO_MATERIALS_PATH"
+            euca-bundle-image -r ${CIRROS_ARCH} -i "$ramdisk" --ramdisk true -d "$BOTO_MATERIALS_PATH"
+            euca-bundle-image -r ${CIRROS_ARCH} -i "$disk_image" -d "$BOTO_MATERIALS_PATH"
         ) 2>&1 </dev/null | cat
     else
         echo "Boto materials are not prepared"
diff --git a/lib/template b/lib/template
index efe5826..f77409b 100644
--- a/lib/template
+++ b/lib/template
@@ -75,13 +75,17 @@
 
 # start_XXXX() - Start running processes, including screen
 function start_XXXX {
-    # screen_it XXXX "cd $XXXX_DIR && $XXXX_DIR/bin/XXXX-bin"
+    # The quoted command must be a single command and not include an
+    # shell metacharacters, redirections or shell builtins.
+    # run_process XXXX "$XXXX_DIR/bin/XXXX-bin"
     :
 }
 
 # stop_XXXX() - Stop running processes (non-screen)
 function stop_XXXX {
-    # FIXME(dtroyer): stop only our screen screen window?
+    # for serv in serv-a serv-b; do
+    #     stop_process $serv
+    # done
     :
 }
 
diff --git a/lib/tls b/lib/tls
index 02906b7..15e8692 100644
--- a/lib/tls
+++ b/lib/tls
@@ -14,16 +14,21 @@
 #
 # - configure_CA
 # - init_CA
+# - cleanup_CA
 
 # - configure_proxy
 # - start_tls_proxy
 
+# - stop_tls_proxy
+# - cleanup_CA
+
 # - make_root_CA
 # - make_int_CA
 # - make_cert ca-dir cert-name "common-name" ["alt-name" ...]
 # - start_tls_proxy HOST_IP 5000 localhost 5000
 # - ensure_certificates
 # - is_ssl_enabled_service
+# - enable_mod_ssl
 
 # Defaults
 # --------
@@ -31,14 +36,9 @@
 if is_service_enabled tls-proxy; then
     # TODO(dtroyer): revisit this below after the search for HOST_IP has been done
     TLS_IP=${TLS_IP:-$SERVICE_IP}
-
-    # Set the default ``SERVICE_PROTOCOL`` for TLS
-    SERVICE_PROTOCOL=https
 fi
 
-# Make up a hostname for cert purposes
-# will be added to /etc/hosts?
-DEVSTACK_HOSTNAME=secure.devstack.org
+DEVSTACK_HOSTNAME=$(hostname -f)
 DEVSTACK_CERT_NAME=devstack-cert
 DEVSTACK_CERT=$DATA_DIR/$DEVSTACK_CERT_NAME.pem
 
@@ -81,6 +81,7 @@
         return 0
     fi
 
+    local i
     for i in certs crl newcerts private; do
         mkdir -p $ca_dir/$i
     done
@@ -205,6 +206,29 @@
 
     # Create the CA bundle
     cat $ROOT_CA_DIR/cacert.pem $INT_CA_DIR/cacert.pem >>$INT_CA_DIR/ca-chain.pem
+    cat $INT_CA_DIR/ca-chain.pem >> $SSL_BUNDLE_FILE
+
+    if is_fedora; then
+        sudo cp $INT_CA_DIR/ca-chain.pem /usr/share/pki/ca-trust-source/anchors/devstack-chain.pem
+        sudo update-ca-trust
+    elif is_ubuntu; then
+        sudo cp $INT_CA_DIR/ca-chain.pem /usr/local/share/ca-certificates/devstack-int.crt
+        sudo cp $ROOT_CA_DIR/cacert.pem /usr/local/share/ca-certificates/devstack-root.crt
+        sudo update-ca-certificates
+    fi
+}
+
+# Clean up the CA files
+# cleanup_CA
+function cleanup_CA {
+    if is_fedora; then
+        sudo rm -f /usr/share/pki/ca-trust-source/anchors/devstack-chain.pem
+        sudo update-ca-trust
+    elif is_ubuntu; then
+        sudo rm -f /usr/local/share/ca-certificates/devstack-int.crt
+        sudo rm -f /usr/local/share/ca-certificates/devstack-root.crt
+        sudo update-ca-certificates
+    fi
 }
 
 # Create an initial server cert
@@ -231,31 +255,34 @@
     local common_name=$3
     local alt_names=$4
 
-    # Generate a signing request
-    $OPENSSL req \
-        -sha1 \
-        -newkey rsa \
-        -nodes \
-        -keyout $ca_dir/private/$cert_name.key \
-        -out $ca_dir/$cert_name.csr \
-        -subj "/O=${ORG_NAME}/OU=${ORG_UNIT_NAME} Servers/CN=${common_name}"
+    # Only generate the certificate if it doesn't exist yet on the disk
+    if [ ! -r "$ca_dir/$cert_name.crt" ]; then
+        # Generate a signing request
+        $OPENSSL req \
+            -sha1 \
+            -newkey rsa \
+            -nodes \
+            -keyout $ca_dir/private/$cert_name.key \
+            -out $ca_dir/$cert_name.csr \
+            -subj "/O=${ORG_NAME}/OU=${ORG_UNIT_NAME} Servers/CN=${common_name}"
 
-    if [[ -z "$alt_names" ]]; then
-        alt_names="DNS:${common_name}"
-    else
-        alt_names="DNS:${common_name},${alt_names}"
+        if [[ -z "$alt_names" ]]; then
+            alt_names="DNS:${common_name}"
+        else
+            alt_names="DNS:${common_name},${alt_names}"
+        fi
+
+        # Sign the request valid for 1 year
+        SUBJECT_ALT_NAME="$alt_names" \
+        $OPENSSL ca -config $ca_dir/signing.conf \
+            -extensions req_extensions \
+            -days 365 \
+            -notext \
+            -in $ca_dir/$cert_name.csr \
+            -out $ca_dir/$cert_name.crt \
+            -subj "/O=${ORG_NAME}/OU=${ORG_UNIT_NAME} Servers/CN=${common_name}" \
+            -batch
     fi
-
-    # Sign the request valid for 1 year
-    SUBJECT_ALT_NAME="$alt_names" \
-    $OPENSSL ca -config $ca_dir/signing.conf \
-        -extensions req_extensions \
-        -days 365 \
-        -notext \
-        -in $ca_dir/$cert_name.csr \
-        -out $ca_dir/$cert_name.crt \
-        -subj "/O=${ORG_NAME}/OU=${ORG_UNIT_NAME} Servers/CN=${common_name}" \
-        -batch
 }
 
 
@@ -270,23 +297,25 @@
     create_CA_config $ca_dir 'Intermediate CA'
     create_signing_config $ca_dir
 
-    # Create a signing certificate request
-    $OPENSSL req -config $ca_dir/ca.conf \
-        -sha1 \
-        -newkey rsa \
-        -nodes \
-        -keyout $ca_dir/private/cacert.key \
-        -out $ca_dir/cacert.csr \
-        -outform PEM
+    if [ ! -r "$ca_dir/cacert.pem" ]; then
+        # Create a signing certificate request
+        $OPENSSL req -config $ca_dir/ca.conf \
+            -sha1 \
+            -newkey rsa \
+            -nodes \
+            -keyout $ca_dir/private/cacert.key \
+            -out $ca_dir/cacert.csr \
+            -outform PEM
 
-    # Sign the intermediate request valid for 1 year
-    $OPENSSL ca -config $signing_ca_dir/ca.conf \
-        -extensions ca_extensions \
-        -days 365 \
-        -notext \
-        -in $ca_dir/cacert.csr \
-        -out $ca_dir/cacert.pem \
-        -batch
+        # Sign the intermediate request valid for 1 year
+        $OPENSSL ca -config $signing_ca_dir/ca.conf \
+            -extensions ca_extensions \
+            -days 365 \
+            -notext \
+            -in $ca_dir/cacert.csr \
+            -out $ca_dir/cacert.pem \
+            -batch
+    fi
 }
 
 # Make a root CA to sign other CAs
@@ -320,7 +349,11 @@
 #
 # Uses global ``SSL_ENABLED_SERVICES``
 function is_ssl_enabled_service {
-    services=$@
+    local services=$@
+    local service=""
+    if [ "$USE_SSL" == "False" ]; then
+        return 1
+    fi
     for service in ${services}; do
         [[ ,${SSL_ENABLED_SERVICES}, =~ ,${service}, ]] && return 0
     done
@@ -335,8 +368,12 @@
 # The function expects to find a certificate, key and CA certificate in the
 # variables {service}_SSL_CERT, {service}_SSL_KEY and {service}_SSL_CA. For
 # example for keystone this would be KEYSTONE_SSL_CERT, KEYSTONE_SSL_KEY and
-# KEYSTONE_SSL_CA. If it does not find these certificates the program will
-# quit.
+# KEYSTONE_SSL_CA.
+#
+# If it does not find these certificates then the devstack-issued server
+# certificate, key and CA certificate will be associated with the service.
+#
+# If only some of the variables are provided then the function will quit.
 function ensure_certificates {
     local service=$1
 
@@ -348,7 +385,15 @@
     local key=${!key_var}
     local ca=${!ca_var}
 
-    if [[ -z "$cert" || -z "$key" || -z "$ca" ]]; then
+    if [[ -z "$cert" && -z "$key" && -z "$ca" ]]; then
+        local cert="$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt"
+        local key="$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key"
+        local ca="$INT_CA_DIR/ca-chain.pem"
+        eval ${service}_SSL_CERT=\$cert
+        eval ${service}_SSL_KEY=\$key
+        eval ${service}_SSL_CA=\$ca
+        return # the CA certificate is already in the bundle
+    elif [[ -z "$cert" || -z "$key" || -z "$ca" ]]; then
         die $LINENO "Missing either the ${cert_var} ${key_var} or ${ca_var}" \
                     "variable to enable SSL for ${service}"
     fi
@@ -356,6 +401,21 @@
     cat $ca >> $SSL_BUNDLE_FILE
 }
 
+# Enable the mod_ssl plugin in Apache
+function enable_mod_ssl {
+    echo "Enabling mod_ssl"
+
+    if is_ubuntu; then
+        sudo a2enmod ssl
+    elif is_fedora; then
+        # Fedora enables mod_ssl by default
+        :
+    fi
+    if ! sudo `which httpd || which apache2ctl` -M | grep -w -q ssl_module; then
+        die $LINENO "mod_ssl is not enabled in apache2/httpd, please check for it manually and run stack.sh again"
+    fi
+}
+
 
 # Proxy Functions
 # ===============
@@ -372,6 +432,22 @@
 }
 
 
+# Cleanup Functions
+# ===============
+
+
+# Stops all stud processes. This should be done only after all services
+# using tls configuration are down.
+function stop_tls_proxy {
+    killall stud
+}
+
+
+# Remove CA along with configuration, as well as the local server certificate
+function cleanup_CA {
+    rm -rf "$DATA_DIR/CA" "$DEVSTACK_CERT"
+}
+
 # Tell emacs to use shell-script-mode
 ## Local variables:
 ## mode: shell-script
diff --git a/lib/trove b/lib/trove
index 2552745..1d1b5f4 100644
--- a/lib/trove
+++ b/lib/trove
@@ -76,21 +76,20 @@
 # service              trove     admin        # if enabled
 
 function create_trove_accounts {
-    # Trove
-    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    SERVICE_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
+    local service_tenant=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    local service_role=$(openstack role list | awk "/ admin / { print \$2 }")
 
     if [[ "$ENABLED_SERVICES" =~ "trove" ]]; then
 
-        TROVE_USER=$(get_or_create_user "trove" \
-            "$SERVICE_PASSWORD" $SERVICE_TENANT "trove@example.com")
-        get_or_add_user_role $SERVICE_ROLE $TROVE_USER $SERVICE_TENANT
+        local trove_user=$(get_or_create_user "trove" \
+            "$SERVICE_PASSWORD" $service_tenant)
+        get_or_add_user_role $service_role $trove_user $service_tenant
 
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
 
-            TROVE_SERVICE=$(get_or_create_service "trove" \
+            local trove_service=$(get_or_create_service "trove" \
                 "database" "Trove Service")
-            get_or_create_endpoint $TROVE_SERVICE \
+            get_or_create_endpoint $trove_service \
                 "$REGION_NAME" \
                 "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" \
                 "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" \
@@ -129,12 +128,7 @@
     cp $TROVE_LOCAL_CONF_DIR/api-paste.ini $TROVE_CONF_DIR/api-paste.ini
     TROVE_API_PASTE_INI=$TROVE_CONF_DIR/api-paste.ini
 
-    iniset $TROVE_API_PASTE_INI filter:authtoken identity_uri $KEYSTONE_AUTH_URI
-    iniset $TROVE_API_PASTE_INI filter:authtoken cafile $KEYSTONE_SSL_CA
-    iniset $TROVE_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME
-    iniset $TROVE_API_PASTE_INI filter:authtoken admin_user trove
-    iniset $TROVE_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD
-    iniset $TROVE_API_PASTE_INI filter:authtoken signing_dir $TROVE_AUTH_CACHE_DIR
+    configure_auth_token_middleware $TROVE_API_PASTE_INI trove $TROVE_AUTH_CACHE_DIR filter:authtoken
 
     # (Re)create trove conf files
     rm -f $TROVE_CONF_DIR/trove.conf
@@ -145,6 +139,8 @@
     iniset $TROVE_CONF_DIR/trove.conf DEFAULT sql_connection `database_connection_url trove`
     iniset $TROVE_CONF_DIR/trove.conf DEFAULT default_datastore $TROVE_DATASTORE_TYPE
     setup_trove_logging $TROVE_CONF_DIR/trove.conf
+    iniset $TROVE_CONF_DIR/trove.conf DEFAULT trove_api_workers "$API_WORKERS"
+
 
     # (Re)create trove taskmanager conf file if needed
     if is_service_enabled tr-tmgr; then
@@ -180,6 +176,7 @@
     iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS
     iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT
     iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT control_exchange trove
+    iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT ignore_users os_admin
     iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT log_dir /tmp/
     iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT log_file trove-guestagent.log
     setup_trove_logging $TROVE_CONF_DIR/trove-guestagent.conf
@@ -203,10 +200,21 @@
     # Initialize the trove database
     $TROVE_BIN_DIR/trove-manage db_sync
 
-    # Upload the trove-guest image to glance
-    TROVE_GUEST_IMAGE_ID=$(upload_image $TROVE_GUEST_IMAGE_URL $TOKEN | grep ' id ' | get_field 2)
+    # If no guest image is specified, skip remaining setup
+    [ -z "$TROVE_GUEST_IMAGE_URL"] && return 0
 
-    # Initialize appropriate datastores / datastore versions
+    # Find the glance id for the trove guest image
+    # The image is uploaded by stack.sh -- see $IMAGE_URLS handling
+    GUEST_IMAGE_NAME=$(basename "$TROVE_GUEST_IMAGE_URL")
+    GUEST_IMAGE_NAME=${GUEST_IMAGE_NAME%.*}
+    TROVE_GUEST_IMAGE_ID=$(openstack --os-token $TOKEN --os-url http://$GLANCE_HOSTPORT image list | grep "${GUEST_IMAGE_NAME}" | get_field 1)
+    if [ -z "$TROVE_GUEST_IMAGE_ID" ]; then
+        # If no glance id is found, skip remaining setup
+        echo "Datastore ${TROVE_DATASTORE_TYPE} will not be created: guest image ${GUEST_IMAGE_NAME} not found."
+        return 1
+    fi
+
+    # Now that we have the guest image id, initialize appropriate datastores / datastore versions
     $TROVE_BIN_DIR/trove-manage datastore_update "$TROVE_DATASTORE_TYPE" ""
     $TROVE_BIN_DIR/trove-manage datastore_version_update "$TROVE_DATASTORE_TYPE" "$TROVE_DATASTORE_VERSION" "$TROVE_DATASTORE_TYPE" \
         "$TROVE_GUEST_IMAGE_ID" "$TROVE_DATASTORE_PACKAGE" 1
@@ -217,16 +225,17 @@
 
 # start_trove() - Start running processes, including screen
 function start_trove {
-    screen_it tr-api "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-api --config-file=$TROVE_CONF_DIR/trove.conf --debug 2>&1"
-    screen_it tr-tmgr "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-taskmanager --config-file=$TROVE_CONF_DIR/trove-taskmanager.conf --debug 2>&1"
-    screen_it tr-cond "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-conductor --config-file=$TROVE_CONF_DIR/trove-conductor.conf --debug 2>&1"
+    run_process tr-api "$TROVE_BIN_DIR/trove-api --config-file=$TROVE_CONF_DIR/trove.conf --debug"
+    run_process tr-tmgr "$TROVE_BIN_DIR/trove-taskmanager --config-file=$TROVE_CONF_DIR/trove-taskmanager.conf --debug"
+    run_process tr-cond "$TROVE_BIN_DIR/trove-conductor --config-file=$TROVE_CONF_DIR/trove-conductor.conf --debug"
 }
 
 # stop_trove() - Stop running processes
 function stop_trove {
     # Kill the trove screen windows
+    local serv
     for serv in tr-api tr-tmgr tr-cond; do
-        screen_stop $serv
+        stop_process $serv
     done
 }
 
diff --git a/lib/zaqar b/lib/zaqar
new file mode 100644
index 0000000..93b727e
--- /dev/null
+++ b/lib/zaqar
@@ -0,0 +1,208 @@
+# lib/zaqar
+# Install and start **Zaqar** service
+
+# To enable a minimal set of Zaqar services, add the following to localrc:
+#
+#     enable_service zaqar-server
+#
+# Dependencies:
+# - functions
+# - OS_AUTH_URL for auth in api
+# - DEST set to the destination directory
+# - SERVICE_PASSWORD, SERVICE_TENANT_NAME for auth in api
+# - STACK_USER service user
+
+# stack.sh
+# ---------
+# install_zaqar
+# configure_zaqar
+# init_zaqar
+# start_zaqar
+# stop_zaqar
+# cleanup_zaqar
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+
+# Set up default directories
+ZAQAR_DIR=$DEST/zaqar
+ZAQARCLIENT_DIR=$DEST/python-zaqarclient
+ZAQAR_CONF_DIR=/etc/zaqar
+ZAQAR_CONF=$ZAQAR_CONF_DIR/zaqar.conf
+ZAQAR_API_LOG_DIR=/var/log/zaqar
+ZAQAR_API_LOG_FILE=$ZAQAR_API_LOG_DIR/queues.log
+ZAQAR_AUTH_CACHE_DIR=${ZAQAR_AUTH_CACHE_DIR:-/var/cache/zaqar}
+
+# Support potential entry-points console scripts
+ZAQAR_BIN_DIR=$(get_python_exec_prefix)
+
+# Set up database backend
+ZAQAR_BACKEND=${ZAQAR_BACKEND:-mongodb}
+
+
+# Set Zaqar repository
+ZAQAR_REPO=${ZAQAR_REPO:-${GIT_BASE}/openstack/zaqar.git}
+ZAQAR_BRANCH=${ZAQAR_BRANCH:-master}
+
+# Set client library repository
+ZAQARCLIENT_REPO=${ZAQARCLIENT_REPO:-${GIT_BASE}/openstack/python-zaqarclient.git}
+ZAQARCLIENT_BRANCH=${ZAQARCLIENT_BRANCH:-master}
+
+# Set Zaqar Connection Info
+ZAQAR_SERVICE_HOST=${ZAQAR_SERVICE_HOST:-$SERVICE_HOST}
+ZAQAR_SERVICE_PORT=${ZAQAR_SERVICE_PORT:-8888}
+ZAQAR_SERVICE_PROTOCOL=${ZAQAR_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
+
+# Tell Tempest this project is present
+TEMPEST_SERVICES+=,zaqar
+
+
+# Functions
+# ---------
+
+# Test if any Zaqar services are enabled
+# is_zaqar_enabled
+function is_zaqar_enabled {
+    [[ ,${ENABLED_SERVICES} =~ ,"zaqar-" ]] && return 0
+    return 1
+}
+
+# cleanup_zaqar() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function cleanup_zaqar {
+    if ! timeout $SERVICE_TIMEOUT sh -c "while ! mongo zaqar --eval 'db.dropDatabase();'; do sleep 1; done"; then
+        die $LINENO "Mongo DB did not start"
+    else
+        full_version=$(mongo zaqar --eval 'db.dropDatabase();')
+        mongo_version=`echo $full_version | cut -d' ' -f4`
+        required_mongo_version='2.2'
+        if [[ $mongo_version < $required_mongo_version ]]; then
+            die $LINENO "Zaqar needs Mongo DB version >= 2.2 to run."
+        fi
+    fi
+}
+
+# configure_zaqarclient() - Set config files, create data dirs, etc
+function configure_zaqarclient {
+    setup_develop $ZAQARCLIENT_DIR
+}
+
+# configure_zaqar() - Set config files, create data dirs, etc
+function configure_zaqar {
+    setup_develop $ZAQAR_DIR
+
+    [ ! -d $ZAQAR_CONF_DIR ] && sudo mkdir -m 755 -p $ZAQAR_CONF_DIR
+    sudo chown $USER $ZAQAR_CONF_DIR
+
+    [ ! -d $ZAQAR_API_LOG_DIR ] &&  sudo mkdir -m 755 -p $ZAQAR_API_LOG_DIR
+    sudo chown $USER $ZAQAR_API_LOG_DIR
+
+    iniset $ZAQAR_CONF DEFAULT verbose True
+    iniset $ZAQAR_CONF DEFAULT use_syslog $SYSLOG
+    iniset $ZAQAR_CONF DEFAULT log_file $ZAQAR_API_LOG_FILE
+    iniset $ZAQAR_CONF 'drivers:transport:wsgi' bind $ZAQAR_SERVICE_HOST
+
+    configure_auth_token_middleware $ZAQAR_CONF zaqar $ZAQAR_AUTH_CACHE_DIR
+
+    if [ "$ZAQAR_BACKEND" = 'mysql' ] || [ "$ZAQAR_BACKEND" = 'postgresql' ] ; then
+        iniset $ZAQAR_CONF drivers storage sqlalchemy
+        iniset $ZAQAR_CONF 'drivers:storage:sqlalchemy' uri `database_connection_url zaqar`
+    elif [ "$ZAQAR_BACKEND" = 'mongodb' ] ; then
+        iniset $ZAQAR_CONF  drivers storage mongodb
+        iniset $ZAQAR_CONF 'drivers:storage:mongodb' uri mongodb://localhost:27017/zaqar
+        configure_mongodb
+        cleanup_zaqar
+    fi
+}
+
+function configure_mongodb {
+    # Set nssize to 2GB. This increases the number of namespaces supported
+    # # per database.
+    if is_ubuntu; then
+        sudo sed -i -e "
+            s|[^ \t]*#[ \t]*\(nssize[ \t]*=.*\$\)|\1|
+            s|^\(nssize[ \t]*=[ \t]*\).*\$|\1 2047|
+        " /etc/mongodb.conf
+        restart_service mongodb
+    elif is_fedora; then
+        sudo sed -i '/--nssize/!s/OPTIONS=\"/OPTIONS=\"--nssize 2047 /' /etc/sysconfig/mongod
+        restart_service mongod
+    fi
+}
+
+# init_zaqar() - Initialize etc.
+function init_zaqar {
+    # Create cache dir
+    sudo mkdir -p $ZAQAR_AUTH_CACHE_DIR
+    sudo chown $STACK_USER $ZAQAR_AUTH_CACHE_DIR
+    rm -f $ZAQAR_AUTH_CACHE_DIR/*
+}
+
+# install_zaqar() - Collect source and prepare
+function install_zaqar {
+    git_clone $ZAQAR_REPO $ZAQAR_DIR $ZAQAR_BRANCH
+    setup_develop $ZAQAR_DIR
+}
+
+# install_zaqarclient() - Collect source and prepare
+function install_zaqarclient {
+    git_clone $ZAQARCLIENT_REPO $ZAQARCLIENT_DIR $ZAQARCLIENT_BRANCH
+    setup_develop $ZAQARCLIENT_DIR
+}
+
+# start_zaqar() - Start running processes, including screen
+function start_zaqar {
+    if [[ "$USE_SCREEN" = "False" ]]; then
+        run_process zaqar-server "zaqar-server --config-file $ZAQAR_CONF --daemon"
+    else
+        run_process zaqar-server "zaqar-server --config-file $ZAQAR_CONF"
+    fi
+
+    echo "Waiting for Zaqar to start..."
+    if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- $ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST:$ZAQAR_SERVICE_PORT/v1/health; do sleep 1; done"; then
+        die $LINENO "Zaqar did not start"
+    fi
+}
+
+# stop_zaqar() - Stop running processes
+function stop_zaqar {
+    local serv
+    # Kill the zaqar screen windows
+    for serv in zaqar-server; do
+        screen -S $SCREEN_NAME -p $serv -X kill
+    done
+}
+
+function create_zaqar_accounts {
+    local service_tenant=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
+
+    local zaqar_user=$(get_or_create_user "zaqar" \
+        "$SERVICE_PASSWORD" $service_tenant)
+    get_or_add_user_role $ADMIN_ROLE $zaqar_user $service_tenant
+
+    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+
+        local zaqar_service=$(get_or_create_service "zaqar" \
+            "messaging" "Zaqar Service")
+        get_or_create_endpoint $zaqar_service \
+            "$REGION_NAME" \
+            "$ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST:$ZAQAR_SERVICE_PORT" \
+            "$ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST:$ZAQAR_SERVICE_PORT" \
+            "$ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST:$ZAQAR_SERVICE_PORT"
+    fi
+
+}
+
+
+# Restore xtrace
+$XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/samples/local.conf b/samples/local.conf
index c8126c2..20c5892 100644
--- a/samples/local.conf
+++ b/samples/local.conf
@@ -24,8 +24,10 @@
 # While ``stack.sh`` is happy to run without ``localrc``, devlife is better when
 # there are a few minimal variables set:
 
-# If the ``*_PASSWORD`` variables are not set here you will be prompted to enter
-# values for them by ``stack.sh`` and they will be added to ``local.conf``.
+# If the ``SERVICE_TOKEN`` and ``*_PASSWORD`` variables are not set
+# here you will be prompted to enter values for them by ``stack.sh``
+# and they will be added to ``local.conf``.
+SERVICE_TOKEN=azertytoken
 ADMIN_PASSWORD=nomoresecrete
 MYSQL_PASSWORD=stackdb
 RABBIT_PASSWORD=stackqueue
diff --git a/stack.sh b/stack.sh
index 5f12a80..2a6a0c4 100755
--- a/stack.sh
+++ b/stack.sh
@@ -40,6 +40,45 @@
 # Keep track of the devstack directory
 TOP_DIR=$(cd $(dirname "$0") && pwd)
 
+# Sanity Checks
+# -------------
+
+# Clean up last environment var cache
+if [[ -r $TOP_DIR/.stackenv ]]; then
+    rm $TOP_DIR/.stackenv
+fi
+
+# ``stack.sh`` keeps the list of ``apt`` and ``rpm`` dependencies and config
+# templates and other useful files in the ``files`` subdirectory
+FILES=$TOP_DIR/files
+if [ ! -d $FILES ]; then
+    die $LINENO "missing devstack/files"
+fi
+
+# ``stack.sh`` keeps function libraries here
+# Make sure ``$TOP_DIR/lib`` directory is present
+if [ ! -d $TOP_DIR/lib ]; then
+    die $LINENO "missing devstack/lib"
+fi
+
+# Check if run as root
+# OpenStack is designed to be run as a non-root user; Horizon will fail to run
+# as **root** since Apache will not serve content from **root** user).
+# ``stack.sh`` must not be run as **root**.  It aborts and suggests one course of
+# action to create a suitable user account.
+
+if [[ $EUID -eq 0 ]]; then
+    echo "You are running this script as root."
+    echo "Cut it out."
+    echo "Really."
+    echo "If you need an account to run DevStack, do this (as root, heh) to create a non-root account:"
+    echo "$TOP_DIR/tools/create-stack-user.sh"
+    exit 1
+fi
+
+# Prepare the environment
+# -----------------------
+
 # Import common functions
 source $TOP_DIR/functions
 
@@ -51,9 +90,18 @@
 # and ``DISTRO``
 GetDistro
 
+# Warn users who aren't on an explicitly supported distro, but allow them to
+# override check and attempt installation with ``FORCE=yes ./stack``
+if [[ ! ${DISTRO} =~ (precise|trusty|7.0|wheezy|sid|testing|jessie|f19|f20|rhel6|rhel7) ]]; then
+    echo "WARNING: this script has not been tested on $DISTRO"
+    if [[ "$FORCE" != "yes" ]]; then
+        die $LINENO "If you wish to run this script anyway run with FORCE=yes"
+    fi
+fi
+
 
 # Global Settings
-# ===============
+# ---------------
 
 # Check for a ``localrc`` section embedded in ``local.conf`` and extract if
 # ``localrc`` does not already exist
@@ -74,6 +122,7 @@
     done
 fi
 
+
 # ``stack.sh`` is customizable by setting environment variables.  Override a
 # default setting via export::
 #
@@ -98,10 +147,19 @@
 # ``stackrc`` sources ``localrc`` to allow you to safely override those settings.
 
 if [[ ! -r $TOP_DIR/stackrc ]]; then
-    log_error $LINENO "missing $TOP_DIR/stackrc - did you grab more than just stack.sh?"
+    die $LINENO "missing $TOP_DIR/stackrc - did you grab more than just stack.sh?"
 fi
 source $TOP_DIR/stackrc
 
+# Check to see if we are already running DevStack
+# Note that this may fail if USE_SCREEN=False
+if type -p screen > /dev/null && screen -ls | egrep -q "[0-9]\.$SCREEN_NAME"; then
+    echo "You are already running a stack.sh session."
+    echo "To rejoin this session type 'screen -x stack'."
+    echo "To destroy this session, type './unstack.sh'."
+    exit 1
+fi
+
 
 # Local Settings
 # --------------
@@ -109,89 +167,21 @@
 # Make sure the proxy config is visible to sub-processes
 export_proxy_variables
 
-# Destination path for installation ``DEST``
-DEST=${DEST:-/opt/stack}
-
-
-# Sanity Check
-# ------------
-
-# Clean up last environment var cache
-if [[ -r $TOP_DIR/.stackenv ]]; then
-    rm $TOP_DIR/.stackenv
-fi
-
-# ``stack.sh`` keeps the list of ``apt`` and ``rpm`` dependencies and config
-# templates and other useful files in the ``files`` subdirectory
-FILES=$TOP_DIR/files
-if [ ! -d $FILES ]; then
-    log_error $LINENO "missing devstack/files"
-fi
-
-# ``stack.sh`` keeps function libraries here
-# Make sure ``$TOP_DIR/lib`` directory is present
-if [ ! -d $TOP_DIR/lib ]; then
-    log_error $LINENO "missing devstack/lib"
-fi
-
-# Import common services (database, message queue) configuration
-source $TOP_DIR/lib/database
-source $TOP_DIR/lib/rpc_backend
-
 # Remove services which were negated in ENABLED_SERVICES
 # using the "-" prefix (e.g., "-rabbit") instead of
 # calling disable_service().
 disable_negated_services
 
-# Warn users who aren't on an explicitly supported distro, but allow them to
-# override check and attempt installation with ``FORCE=yes ./stack``
-if [[ ! ${DISTRO} =~ (precise|saucy|trusty|7.0|wheezy|sid|testing|jessie|f19|f20|rhel6|rhel7) ]]; then
-    echo "WARNING: this script has not been tested on $DISTRO"
-    if [[ "$FORCE" != "yes" ]]; then
-        die $LINENO "If you wish to run this script anyway run with FORCE=yes"
-    fi
-fi
-
 # Look for obsolete stuff
 if [[ ,${ENABLED_SERVICES}, =~ ,"swift", ]]; then
     echo "FATAL: 'swift' is not supported as a service name"
-    echo "FATAL: Use the actual swift service names to enable tham as required:"
+    echo "FATAL: Use the actual swift service names to enable them as required:"
     echo "FATAL: s-proxy s-object s-container s-account"
     exit 1
 fi
 
-# Make sure we only have one rpc backend enabled,
-# and the specified rpc backend is available on your platform.
-check_rpc_backend
-
-# Check to see if we are already running DevStack
-# Note that this may fail if USE_SCREEN=False
-if type -p screen >/dev/null && screen -ls | egrep -q "[0-9].$SCREEN_NAME"; then
-    echo "You are already running a stack.sh session."
-    echo "To rejoin this session type 'screen -x stack'."
-    echo "To destroy this session, type './unstack.sh'."
-    exit 1
-fi
-
-# Set up logging level
-VERBOSE=$(trueorfalse True $VERBOSE)
-
-# root Access
-# -----------
-
-# OpenStack is designed to be run as a non-root user; Horizon will fail to run
-# as **root** since Apache will not serve content from **root** user).
-# ``stack.sh`` must not be run as **root**.  It aborts and suggests one course of
-# action to create a suitable user account.
-
-if [[ $EUID -eq 0 ]]; then
-    echo "You are running this script as root."
-    echo "Cut it out."
-    echo "Really."
-    echo "If you need an account to run DevStack, do this (as root, heh) to create $STACK_USER:"
-    echo "$TOP_DIR/tools/create-stack-user.sh"
-    exit 1
-fi
+# Configure sudo
+# --------------
 
 # We're not **root**, make sure ``sudo`` is available
 is_package_installed sudo || install_package sudo
@@ -211,27 +201,27 @@
 sudo chown root:root $TEMPFILE
 sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh
 
-# Additional repos
-# ----------------
+
+# Configure Distro Repositories
+# -----------------------------
 
 # For debian/ubuntu make apt attempt to retry network ops on it's own
 if is_ubuntu; then
-    echo 'APT::Acquire::Retries "20";' | sudo tee /etc/apt/apt.conf.d/80retry
+    echo 'APT::Acquire::Retries "20";' | sudo tee /etc/apt/apt.conf.d/80retry  >/dev/null
+fi
+
+# upstream Rackspace centos7 images have an issue where cloud-init is
+# installed via pip because there were not official packages when the
+# image was created (fix in the works).  Remove all pip packages
+# before we do anything else
+if [[ $DISTRO = "rhel7" && is_rackspace ]]; then
+    (sudo pip freeze | xargs sudo pip uninstall -y) || true
 fi
 
 # Some distros need to add repos beyond the defaults provided by the vendor
 # to pick up required packages.
 
-# The Debian Wheezy official repositories do not contain all required packages,
-# add gplhost repository.
-if [[ "$os_VENDOR" =~ (Debian) ]]; then
-    echo 'deb http://archive.gplhost.com/debian grizzly main' | sudo tee /etc/apt/sources.list.d/gplhost_wheezy-backports.list
-    echo 'deb http://archive.gplhost.com/debian grizzly-backports main' | sudo tee -a /etc/apt/sources.list.d/gplhost_wheezy-backports.list
-    apt_get update
-    apt_get install --force-yes gplhost-archive-keyring
-fi
-
-if [[ is_fedora && $DISTRO =~ (rhel) ]]; then
+if [[ is_fedora && $DISTRO == "rhel6" ]]; then
     # Installing Open vSwitch on RHEL requires enabling the RDO repo.
     RHEL6_RDO_REPO_RPM=${RHEL6_RDO_REPO_RPM:-"http://rdo.fedorapeople.org/openstack-icehouse/rdo-release-icehouse.rpm"}
     RHEL6_RDO_REPO_ID=${RHEL6_RDO_REPO_ID:-"openstack-icehouse"}
@@ -240,10 +230,13 @@
         yum_install $RHEL6_RDO_REPO_RPM || \
             die $LINENO "Error installing RDO repo, cannot continue"
     fi
+fi
+
+if [[ is_fedora && ( $DISTRO == "rhel6" || $DISTRO == "rhel7" ) ]]; then
     # RHEL requires EPEL for many Open Stack dependencies
-    if [[ $DISTRO =~ (rhel7) ]]; then
-        EPEL_RPM=${RHEL7_EPEL_RPM:-"http://dl.fedoraproject.org/pub/epel/beta/7/x86_64/epel-release-7-0.2.noarch.rpm"}
-    else
+    if [[ $DISTRO == "rhel7" ]]; then
+        EPEL_RPM=${RHEL7_EPEL_RPM:-"http://dl.fedoraproject.org/pub/epel/beta/7/x86_64/epel-release-7-1.noarch.rpm"}
+    elif [[ $DISTRO == "rhel6" ]]; then
         EPEL_RPM=${RHEL6_EPEL_RPM:-"http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm"}
     fi
     if ! sudo yum repolist enabled epel | grep -q 'epel'; then
@@ -254,17 +247,20 @@
 
     # ... and also optional to be enabled
     is_package_installed yum-utils || install_package yum-utils
-    if [[ $DISTRO =~ (rhel7) ]]; then
+    if [[ $DISTRO == "rhel7" ]]; then
         OPTIONAL_REPO=rhel-7-server-optional-rpms
-    else
+    elif [[ $DISTRO == "rhel6" ]]; then
         OPTIONAL_REPO=rhel-6-server-optional-rpms
     fi
     sudo yum-config-manager --enable ${OPTIONAL_REPO}
-
 fi
 
-# Filesystem setup
-# ----------------
+
+# Configure Target Directories
+# ----------------------------
+
+# Destination path for installation ``DEST``
+DEST=${DEST:-/opt/stack}
 
 # Create the destination directory and ensure it is writable by the user
 # and read/executable by everybody for daemons (e.g. apache run for horizon)
@@ -275,6 +271,12 @@
 # a basic test for $DEST path permissions (fatal on error unless skipped)
 check_path_perm_sanity ${DEST}
 
+# Destination path for service data
+DATA_DIR=${DATA_DIR:-${DEST}/data}
+sudo mkdir -p $DATA_DIR
+safe_chown -R $STACK_USER $DATA_DIR
+
+# Configure proper hostname
 # Certain services such as rabbitmq require that the local hostname resolves
 # correctly.  Make sure it exists in /etc/hosts so that is always true.
 LOCAL_HOSTNAME=`hostname -s`
@@ -282,217 +284,13 @@
     sudo sed -i "s/\(^127.0.0.1.*\)/\1 $LOCAL_HOSTNAME/" /etc/hosts
 fi
 
-# Destination path for service data
-DATA_DIR=${DATA_DIR:-${DEST}/data}
-sudo mkdir -p $DATA_DIR
-safe_chown -R $STACK_USER $DATA_DIR
 
-
-# Common Configuration
-# --------------------
-
-# Set ``OFFLINE`` to ``True`` to configure ``stack.sh`` to run cleanly without
-# Internet access. ``stack.sh`` must have been previously run with Internet
-# access to install prerequisites and fetch repositories.
-OFFLINE=`trueorfalse False $OFFLINE`
-
-# Set ``ERROR_ON_CLONE`` to ``True`` to configure ``stack.sh`` to exit if
-# the destination git repository does not exist during the ``git_clone``
-# operation.
-ERROR_ON_CLONE=`trueorfalse False $ERROR_ON_CLONE`
-
-# Whether to enable the debug log level in OpenStack services
-ENABLE_DEBUG_LOG_LEVEL=`trueorfalse True $ENABLE_DEBUG_LOG_LEVEL`
-
-# Set fixed and floating range here so we can make sure not to use addresses
-# from either range when attempting to guess the IP to use for the host.
-# Note that setting FIXED_RANGE may be necessary when running DevStack
-# in an OpenStack cloud that uses either of these address ranges internally.
-FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24}
-FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24}
-FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256}
-
-HOST_IP=$(get_default_host_ip $FIXED_RANGE $FLOATING_RANGE "$HOST_IP_IFACE" "$HOST_IP")
-if [ "$HOST_IP" == "" ]; then
-    die $LINENO "Could not determine host ip address.  See local.conf for suggestions on setting HOST_IP."
-fi
-
-# Allow the use of an alternate hostname (such as localhost/127.0.0.1) for service endpoints.
-SERVICE_HOST=${SERVICE_HOST:-$HOST_IP}
-
-# Allow the use of an alternate protocol (such as https) for service endpoints
-SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http}
-
-# Configure services to use syslog instead of writing to individual log files
-SYSLOG=`trueorfalse False $SYSLOG`
-SYSLOG_HOST=${SYSLOG_HOST:-$HOST_IP}
-SYSLOG_PORT=${SYSLOG_PORT:-516}
-
-# for DSTAT logging
-DSTAT_FILE=${DSTAT_FILE:-"dstat.txt"}
-
-# Use color for logging output (only available if syslog is not used)
-LOG_COLOR=`trueorfalse True $LOG_COLOR`
-
-# Service startup timeout
-SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60}
-
-# Reset the bundle of CA certificates
-SSL_BUNDLE_FILE="$DATA_DIR/ca-bundle.pem"
-rm -f $SSL_BUNDLE_FILE
-
-
-# Configure Projects
-# ==================
-
-# Import apache functions
-source $TOP_DIR/lib/apache
-
-# Import TLS functions
-source $TOP_DIR/lib/tls
-
-# Source project function libraries
-source $TOP_DIR/lib/infra
-source $TOP_DIR/lib/oslo
-source $TOP_DIR/lib/stackforge
-source $TOP_DIR/lib/horizon
-source $TOP_DIR/lib/keystone
-source $TOP_DIR/lib/glance
-source $TOP_DIR/lib/nova
-source $TOP_DIR/lib/cinder
-source $TOP_DIR/lib/swift
-source $TOP_DIR/lib/ceilometer
-source $TOP_DIR/lib/heat
-source $TOP_DIR/lib/neutron
-source $TOP_DIR/lib/baremetal
-source $TOP_DIR/lib/ldap
-
-# Extras Source
-# --------------
-
-# Phase: source
-if [[ -d $TOP_DIR/extras.d ]]; then
-    for i in $TOP_DIR/extras.d/*.sh; do
-        [[ -r $i ]] && source $i source
-    done
-fi
-
-# Set the destination directories for other OpenStack projects
-OPENSTACKCLIENT_DIR=$DEST/python-openstackclient
-
-# Interactive Configuration
-# -------------------------
-
-# Do all interactive config up front before the logging spew begins
-
-# Generic helper to configure passwords
-function read_password {
-    XTRACE=$(set +o | grep xtrace)
-    set +o xtrace
-    var=$1; msg=$2
-    pw=${!var}
-
-    if [[ -f $RC_DIR/localrc ]]; then
-        localrc=$TOP_DIR/localrc
-    else
-        localrc=$TOP_DIR/.localrc.auto
-    fi
-
-    # If the password is not defined yet, proceed to prompt user for a password.
-    if [ ! $pw ]; then
-        # If there is no localrc file, create one
-        if [ ! -e $localrc ]; then
-            touch $localrc
-        fi
-
-        # Presumably if we got this far it can only be that our localrc is missing
-        # the required password.  Prompt user for a password and write to localrc.
-        echo ''
-        echo '################################################################################'
-        echo $msg
-        echo '################################################################################'
-        echo "This value will be written to your localrc file so you don't have to enter it "
-        echo "again.  Use only alphanumeric characters."
-        echo "If you leave this blank, a random default value will be used."
-        pw=" "
-        while true; do
-            echo "Enter a password now:"
-            read -e $var
-            pw=${!var}
-            [[ "$pw" = "`echo $pw | tr -cd [:alnum:]`" ]] && break
-            echo "Invalid chars in password.  Try again:"
-        done
-        if [ ! $pw ]; then
-            pw=$(cat /dev/urandom | tr -cd 'a-f0-9' | head -c 20)
-        fi
-        eval "$var=$pw"
-        echo "$var=$pw" >> $localrc
-    fi
-    $XTRACE
-}
-
-
-# Database Configuration
-
-# To select between database backends, add the following to ``localrc``:
-#
-#    disable_service mysql
-#    enable_service postgresql
-#
-# The available database backends are listed in ``DATABASE_BACKENDS`` after
-# ``lib/database`` is sourced. ``mysql`` is the default.
-
-initialize_database_backends && echo "Using $DATABASE_TYPE database backend" || echo "No database enabled"
-
-
-# Queue Configuration
-
-# Rabbit connection info
-if is_service_enabled rabbit; then
-    RABBIT_HOST=${RABBIT_HOST:-$SERVICE_HOST}
-    read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT."
-fi
-
-
-# Keystone
-
-if is_service_enabled key; then
-    # The ``SERVICE_TOKEN`` is used to bootstrap the Keystone database.  It is
-    # just a string and is not a 'real' Keystone token.
-    read_password SERVICE_TOKEN "ENTER A SERVICE_TOKEN TO USE FOR THE SERVICE ADMIN TOKEN."
-    # Services authenticate to Identity with servicename/``SERVICE_PASSWORD``
-    read_password SERVICE_PASSWORD "ENTER A SERVICE_PASSWORD TO USE FOR THE SERVICE AUTHENTICATION."
-    # Horizon currently truncates usernames and passwords at 20 characters
-    read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (20 CHARS OR LESS)."
-
-    # Keystone can now optionally install OpenLDAP by enabling the ``ldap``
-    # service in ``localrc`` (e.g. ``enable_service ldap``).
-    # To clean out the Keystone contents in OpenLDAP set ``KEYSTONE_CLEAR_LDAP``
-    # to ``yes`` (e.g. ``KEYSTONE_CLEAR_LDAP=yes``) in ``localrc``.  To enable the
-    # Keystone Identity Driver (``keystone.identity.backends.ldap.Identity``)
-    # set ``KEYSTONE_IDENTITY_BACKEND`` to ``ldap`` (e.g.
-    # ``KEYSTONE_IDENTITY_BACKEND=ldap``) in ``localrc``.
-
-    # only request ldap password if the service is enabled
-    if is_service_enabled ldap; then
-        read_password LDAP_PASSWORD "ENTER A PASSWORD TO USE FOR LDAP"
-    fi
-fi
-
-
-# Swift
-
-if is_service_enabled s-proxy; then
-    # We only ask for Swift Hash if we have enabled swift service.
-    # ``SWIFT_HASH`` is a random unique string for a swift cluster that
-    # can never change.
-    read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH."
-fi
-
-
-# Configure logging
+# Configure Logging
 # -----------------
 
+# Set up logging level
+VERBOSE=$(trueorfalse True $VERBOSE)
+
 # Draw a spinner so the user knows something is happening
 function spinner {
     local delay=0.75
@@ -533,7 +331,7 @@
     echo $@ >&3
 }
 
-if [[ is_fedora && $DISTRO =~ (rhel) ]]; then
+if [[ is_fedora && $DISTRO == "rhel6" ]]; then
     # poor old python2.6 doesn't have argparse by default, which
     # outfilter.py uses
     is_package_installed python-argparse || install_package python-argparse
@@ -612,8 +410,8 @@
 fi
 
 
-# Set Up Script Execution
-# -----------------------
+# Configure Error Traps
+# ---------------------
 
 # Kill background processes on exit
 trap exit_trap EXIT
@@ -632,7 +430,11 @@
 
     if [[ $r -ne 0 ]]; then
         echo "Error on exit"
-        ./tools/worlddump.py -d $LOGDIR
+        if [[ -z $LOGDIR ]]; then
+            $TOP_DIR/tools/worlddump.py
+        else
+            $TOP_DIR/tools/worlddump.py -d $LOGDIR
+        fi
     fi
 
     exit $r
@@ -651,7 +453,7 @@
     exit $r
 }
 
-
+# Begin trapping error exit codes
 set -o errexit
 
 # Print the commands being run so that we can see the command that triggers
@@ -659,6 +461,224 @@
 set -o xtrace
 
 
+# Common Configuration
+# --------------------
+
+# Set ``OFFLINE`` to ``True`` to configure ``stack.sh`` to run cleanly without
+# Internet access. ``stack.sh`` must have been previously run with Internet
+# access to install prerequisites and fetch repositories.
+OFFLINE=`trueorfalse False $OFFLINE`
+
+# Set ``ERROR_ON_CLONE`` to ``True`` to configure ``stack.sh`` to exit if
+# the destination git repository does not exist during the ``git_clone``
+# operation.
+ERROR_ON_CLONE=`trueorfalse False $ERROR_ON_CLONE`
+
+# Whether to enable the debug log level in OpenStack services
+ENABLE_DEBUG_LOG_LEVEL=`trueorfalse True $ENABLE_DEBUG_LOG_LEVEL`
+
+# Set fixed and floating range here so we can make sure not to use addresses
+# from either range when attempting to guess the IP to use for the host.
+# Note that setting FIXED_RANGE may be necessary when running DevStack
+# in an OpenStack cloud that uses either of these address ranges internally.
+FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24}
+FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24}
+FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256}
+
+HOST_IP=$(get_default_host_ip $FIXED_RANGE $FLOATING_RANGE "$HOST_IP_IFACE" "$HOST_IP")
+if [ "$HOST_IP" == "" ]; then
+    die $LINENO "Could not determine host ip address.  See local.conf for suggestions on setting HOST_IP."
+fi
+
+# Allow the use of an alternate hostname (such as localhost/127.0.0.1) for service endpoints.
+SERVICE_HOST=${SERVICE_HOST:-$HOST_IP}
+
+# Configure services to use syslog instead of writing to individual log files
+SYSLOG=`trueorfalse False $SYSLOG`
+SYSLOG_HOST=${SYSLOG_HOST:-$HOST_IP}
+SYSLOG_PORT=${SYSLOG_PORT:-516}
+
+# Use color for logging output (only available if syslog is not used)
+LOG_COLOR=`trueorfalse True $LOG_COLOR`
+
+# Service startup timeout
+SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60}
+
+# Reset the bundle of CA certificates
+SSL_BUNDLE_FILE="$DATA_DIR/ca-bundle.pem"
+rm -f $SSL_BUNDLE_FILE
+
+# Import common services (database, message queue) configuration
+source $TOP_DIR/lib/database
+source $TOP_DIR/lib/rpc_backend
+
+# Make sure we only have one rpc backend enabled,
+# and the specified rpc backend is available on your platform.
+check_rpc_backend
+
+# Use native SSL for servers in SSL_ENABLED_SERVICES
+USE_SSL=$(trueorfalse False $USE_SSL)
+
+# Service to enable with SSL if USE_SSL is True
+SSL_ENABLED_SERVICES="key,nova,cinder,glance,s-proxy,neutron"
+
+if is_service_enabled tls-proxy && [ "$USE_SSL" == "True" ]; then
+    die $LINENO "tls-proxy and SSL are mutually exclusive"
+fi
+
+# Configure Projects
+# ==================
+
+# Import apache functions
+source $TOP_DIR/lib/apache
+
+# Import TLS functions
+source $TOP_DIR/lib/tls
+
+# Source project function libraries
+source $TOP_DIR/lib/infra
+source $TOP_DIR/lib/oslo
+source $TOP_DIR/lib/stackforge
+source $TOP_DIR/lib/horizon
+source $TOP_DIR/lib/keystone
+source $TOP_DIR/lib/glance
+source $TOP_DIR/lib/nova
+source $TOP_DIR/lib/cinder
+source $TOP_DIR/lib/swift
+source $TOP_DIR/lib/ceilometer
+source $TOP_DIR/lib/heat
+source $TOP_DIR/lib/neutron
+source $TOP_DIR/lib/baremetal
+source $TOP_DIR/lib/ldap
+source $TOP_DIR/lib/dstat
+
+# Extras Source
+# --------------
+
+# Phase: source
+if [[ -d $TOP_DIR/extras.d ]]; then
+    for i in $TOP_DIR/extras.d/*.sh; do
+        [[ -r $i ]] && source $i source
+    done
+fi
+
+# Set the destination directories for other OpenStack projects
+OPENSTACKCLIENT_DIR=$DEST/python-openstackclient
+
+# Interactive Configuration
+# -------------------------
+
+# Do all interactive config up front before the logging spew begins
+
+# Generic helper to configure passwords
+function read_password {
+    XTRACE=$(set +o | grep xtrace)
+    set +o xtrace
+    var=$1; msg=$2
+    pw=${!var}
+
+    if [[ -f $RC_DIR/localrc ]]; then
+        localrc=$TOP_DIR/localrc
+    else
+        localrc=$TOP_DIR/.localrc.auto
+    fi
+
+    # If the password is not defined yet, proceed to prompt user for a password.
+    if [ ! $pw ]; then
+        # If there is no localrc file, create one
+        if [ ! -e $localrc ]; then
+            touch $localrc
+        fi
+
+        # Presumably if we got this far it can only be that our localrc is missing
+        # the required password.  Prompt user for a password and write to localrc.
+        echo ''
+        echo '################################################################################'
+        echo $msg
+        echo '################################################################################'
+        echo "This value will be written to your localrc file so you don't have to enter it "
+        echo "again.  Use only alphanumeric characters."
+        echo "If you leave this blank, a random default value will be used."
+        pw=" "
+        while true; do
+            echo "Enter a password now:"
+            read -e $var
+            pw=${!var}
+            [[ "$pw" = "`echo $pw | tr -cd [:alnum:]`" ]] && break
+            echo "Invalid chars in password.  Try again:"
+        done
+        if [ ! $pw ]; then
+            pw=$(generate_hex_string 10)
+        fi
+        eval "$var=$pw"
+        echo "$var=$pw" >> $localrc
+    fi
+    $XTRACE
+}
+
+
+# Database Configuration
+
+# To select between database backends, add the following to ``localrc``:
+#
+#    disable_service mysql
+#    enable_service postgresql
+#
+# The available database backends are listed in ``DATABASE_BACKENDS`` after
+# ``lib/database`` is sourced. ``mysql`` is the default.
+
+initialize_database_backends && echo "Using $DATABASE_TYPE database backend" || echo "No database enabled"
+
+
+# Queue Configuration
+
+# Rabbit connection info
+if is_service_enabled rabbit; then
+    RABBIT_HOST=${RABBIT_HOST:-$SERVICE_HOST}
+    read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT."
+fi
+
+
+# Keystone
+
+if is_service_enabled key; then
+    # The ``SERVICE_TOKEN`` is used to bootstrap the Keystone database.  It is
+    # just a string and is not a 'real' Keystone token.
+    read_password SERVICE_TOKEN "ENTER A SERVICE_TOKEN TO USE FOR THE SERVICE ADMIN TOKEN."
+    # Services authenticate to Identity with servicename/``SERVICE_PASSWORD``
+    read_password SERVICE_PASSWORD "ENTER A SERVICE_PASSWORD TO USE FOR THE SERVICE AUTHENTICATION."
+    # Horizon currently truncates usernames and passwords at 20 characters
+    read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (20 CHARS OR LESS)."
+
+    # Keystone can now optionally install OpenLDAP by enabling the ``ldap``
+    # service in ``localrc`` (e.g. ``enable_service ldap``).
+    # To clean out the Keystone contents in OpenLDAP set ``KEYSTONE_CLEAR_LDAP``
+    # to ``yes`` (e.g. ``KEYSTONE_CLEAR_LDAP=yes``) in ``localrc``.  To enable the
+    # Keystone Identity Driver (``keystone.identity.backends.ldap.Identity``)
+    # set ``KEYSTONE_IDENTITY_BACKEND`` to ``ldap`` (e.g.
+    # ``KEYSTONE_IDENTITY_BACKEND=ldap``) in ``localrc``.
+
+    # only request ldap password if the service is enabled
+    if is_service_enabled ldap; then
+        read_password LDAP_PASSWORD "ENTER A PASSWORD TO USE FOR LDAP"
+    fi
+fi
+
+
+# Swift
+
+if is_service_enabled s-proxy; then
+    # We only ask for Swift Hash if we have enabled swift service.
+    # ``SWIFT_HASH`` is a random unique string for a swift cluster that
+    # can never change.
+    read_password SWIFT_HASH "ENTER A RANDOM SWIFT HASH."
+
+    if [[ -z "$SWIFT_TEMPURL_KEY" ]] && [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]]; then
+        read_password SWIFT_TEMPURL_KEY "ENTER A KEY FOR SWIFT TEMPURLS."
+    fi
+fi
+
+
 # Install Packages
 # ================
 
@@ -671,11 +691,11 @@
 
 # Configure an appropriate python environment
 if [[ "$OFFLINE" != "True" ]]; then
-    $TOP_DIR/tools/install_pip.sh
+    PYPI_ALTERNATIVE_URL=$PYPI_ALTERNATIVE_URL $TOP_DIR/tools/install_pip.sh
 fi
 
-# Do the ugly hacks for borken packages and distros
-$TOP_DIR/tools/fixup_stuff.sh
+# Do the ugly hacks for broken packages and distros
+source $TOP_DIR/tools/fixup_stuff.sh
 
 
 # Extras Pre-install
@@ -805,16 +825,16 @@
     install_ceilometer
     echo_summary "Configuring Ceilometer"
     configure_ceilometer
-    configure_ceilometerclient
 fi
 
 if is_service_enabled heat; then
     install_heat
+    install_heat_other
     cleanup_heat
     configure_heat
 fi
 
-if is_service_enabled tls-proxy; then
+if is_service_enabled tls-proxy || [ "$USE_SSL" == "True" ]; then
     configure_CA
     init_CA
     init_cert
@@ -937,12 +957,7 @@
 # -------
 
 # A better kind of sysstat, with the top process per time slice
-DSTAT_OPTS="-tcmndrylp --top-cpu-adv"
-if [[ -n ${SCREEN_LOGDIR} ]]; then
-    screen_it dstat "cd $TOP_DIR; dstat $DSTAT_OPTS | tee $SCREEN_LOGDIR/$DSTAT_FILE"
-else
-    screen_it dstat "dstat $DSTAT_OPTS"
-fi
+start_dstat
 
 # Start Services
 # ==============
@@ -985,7 +1000,7 @@
         create_swift_accounts
     fi
 
-    if is_service_enabled heat; then
+    if is_service_enabled heat && [[ "$HEAT_STANDALONE" != "True" ]]; then
         create_heat_accounts
     fi
 
@@ -1200,16 +1215,12 @@
 
 # Create a randomized default value for the keymgr's fixed_key
 if is_service_enabled nova; then
-    FIXED_KEY=""
-    for i in $(seq 1 64); do
-        FIXED_KEY+=$(echo "obase=16; $(($RANDOM % 16))" | bc);
-    done;
-    iniset $NOVA_CONF keymgr fixed_key "$FIXED_KEY"
+    iniset $NOVA_CONF keymgr fixed_key $(generate_hex_string 32)
 fi
 
 if is_service_enabled zeromq; then
     echo_summary "Starting zermomq receiver"
-    screen_it zeromq "cd $NOVA_DIR && $OSLO_BIN_DIR/oslo-messaging-zmq-receiver"
+    run_process zeromq "$OSLO_BIN_DIR/oslo-messaging-zmq-receiver"
 fi
 
 # Launch the nova-api and wait for it to answer before continuing
@@ -1269,6 +1280,10 @@
     init_heat
     echo_summary "Starting Heat"
     start_heat
+    if [ "$HEAT_CREATE_TEST_IMAGE" = "True" ]; then
+        echo_summary "Building Heat functional test image"
+        build_heat_functional_test_image
+    fi
 fi
 
 
@@ -1286,6 +1301,10 @@
         USERRC_PARAMS="$USERRC_PARAMS --os-cacert $SSL_BUNDLE_FILE"
     fi
 
+    if [[ "$HEAT_STANDALONE" = "True" ]]; then
+        USERRC_PARAMS="$USERRC_PARAMS --heat-url http://$HEAT_API_HOST:$HEAT_API_PORT/v1"
+    fi
+
     $TOP_DIR/tools/create_userrc.sh $USERRC_PARAMS
 fi
 
@@ -1313,7 +1332,7 @@
     fi
     # ensure callback daemon is running
     sudo pkill nova-baremetal-deploy-helper || true
-    screen_it baremetal "cd ; nova-baremetal-deploy-helper"
+    run_process baremetal "nova-baremetal-deploy-helper"
 fi
 
 # Save some values we generated for later use
@@ -1408,120 +1427,55 @@
     echo_summary "WARNING: $DEPRECATED_TEXT"
 fi
 
-# TODO(dtroyer): Remove EXTRA_OPTS after stable/icehouse branch is cut
-# Specific warning for deprecated configs
-if [[ -n "$EXTRA_OPTS" ]]; then
-    echo ""
-    echo_summary "WARNING: EXTRA_OPTS is used"
-    echo "You are using EXTRA_OPTS to pass configuration into nova.conf."
-    echo "Please convert that configuration in localrc to a nova.conf section in local.conf:"
-    echo "EXTRA_OPTS will be removed early in the Juno development cycle"
-    echo "
-[[post-config|\$NOVA_CONF]]
-[DEFAULT]
-"
-    for I in "${EXTRA_OPTS[@]}"; do
-        # Replace the first '=' with ' ' for iniset syntax
-        echo ${I}
-    done
-fi
-
-# TODO(dtroyer): Remove EXTRA_BAREMETAL_OPTS after stable/icehouse branch is cut
-if [[ -n "$EXTRA_BAREMETAL_OPTS" ]]; then
-    echo ""
-    echo_summary "WARNING: EXTRA_BAREMETAL_OPTS is used"
-    echo "You are using EXTRA_BAREMETAL_OPTS to pass configuration into nova.conf."
-    echo "Please convert that configuration in localrc to a nova.conf section in local.conf:"
-    echo "EXTRA_BAREMETAL_OPTS will be removed early in the Juno development cycle"
-    echo "
-[[post-config|\$NOVA_CONF]]
-[baremetal]
-"
-    for I in "${EXTRA_BAREMETAL_OPTS[@]}"; do
-        # Replace the first '=' with ' ' for iniset syntax
-        echo ${I}
-    done
-fi
-
-# TODO(dtroyer): Remove Q_AGENT_EXTRA_AGENT_OPTS after stable/juno branch is cut
-if [[ -n "$Q_AGENT_EXTRA_AGENT_OPTS" ]]; then
-    echo ""
-    echo_summary "WARNING: Q_AGENT_EXTRA_AGENT_OPTS is used"
-    echo "You are using Q_AGENT_EXTRA_AGENT_OPTS to pass configuration into $NEUTRON_CONF."
-    echo "Please convert that configuration in localrc to a $NEUTRON_CONF section in local.conf:"
-    echo "Q_AGENT_EXTRA_AGENT_OPTS will be removed early in the 'K' development cycle"
-    echo "
+if is_service_enabled neutron; then
+    # TODO(dtroyer): Remove Q_AGENT_EXTRA_AGENT_OPTS after stable/juno branch is cut
+    if [[ -n "$Q_AGENT_EXTRA_AGENT_OPTS" ]]; then
+        echo ""
+        echo_summary "WARNING: Q_AGENT_EXTRA_AGENT_OPTS is used"
+        echo "You are using Q_AGENT_EXTRA_AGENT_OPTS to pass configuration into $NEUTRON_CONF."
+        echo "Please convert that configuration in localrc to a $NEUTRON_CONF section in local.conf:"
+        echo "Q_AGENT_EXTRA_AGENT_OPTS will be removed early in the 'K' development cycle"
+        echo "
 [[post-config|/\$Q_PLUGIN_CONF_FILE]]
 [DEFAULT]
 "
-    for I in "${Q_AGENT_EXTRA_AGENT_OPTS[@]}"; do
-        # Replace the first '=' with ' ' for iniset syntax
-        echo ${I}
-    done
-fi
+        for I in "${Q_AGENT_EXTRA_AGENT_OPTS[@]}"; do
+            # Replace the first '=' with ' ' for iniset syntax
+            echo ${I}
+        done
+    fi
 
-# TODO(dtroyer): Remove Q_AGENT_EXTRA_SRV_OPTS after stable/juno branch is cut
-if [[ -n "$Q_AGENT_EXTRA_SRV_OPTS" ]]; then
-    echo ""
-    echo_summary "WARNING: Q_AGENT_EXTRA_SRV_OPTS is used"
-    echo "You are using Q_AGENT_EXTRA_SRV_OPTS to pass configuration into $NEUTRON_CONF."
-    echo "Please convert that configuration in localrc to a $NEUTRON_CONF section in local.conf:"
-    echo "Q_AGENT_EXTRA_AGENT_OPTS will be removed early in the 'K' development cycle"
-    echo "
+    # TODO(dtroyer): Remove Q_AGENT_EXTRA_SRV_OPTS after stable/juno branch is cut
+    if [[ -n "$Q_AGENT_EXTRA_SRV_OPTS" ]]; then
+        echo ""
+        echo_summary "WARNING: Q_AGENT_EXTRA_SRV_OPTS is used"
+        echo "You are using Q_AGENT_EXTRA_SRV_OPTS to pass configuration into $NEUTRON_CONF."
+        echo "Please convert that configuration in localrc to a $NEUTRON_CONF section in local.conf:"
+        echo "Q_AGENT_EXTRA_AGENT_OPTS will be removed early in the 'K' development cycle"
+        echo "
 [[post-config|/\$Q_PLUGIN_CONF_FILE]]
 [DEFAULT]
 "
-    for I in "${Q_AGENT_EXTRA_SRV_OPTS[@]}"; do
-        # Replace the first '=' with ' ' for iniset syntax
-        echo ${I}
-    done
+        for I in "${Q_AGENT_EXTRA_SRV_OPTS[@]}"; do
+            # Replace the first '=' with ' ' for iniset syntax
+            echo ${I}
+        done
+    fi
 fi
 
-# TODO(dtroyer): Remove Q_DHCP_EXTRA_DEFAULT_OPTS after stable/icehouse branch is cut
-if [[ -n "$Q_DHCP_EXTRA_DEFAULT_OPTS" ]]; then
-    echo ""
-    echo_summary "WARNING: Q_DHCP_EXTRA_DEFAULT_OPTS is used"
-    echo "You are using Q_DHCP_EXTRA_DEFAULT_OPTS to pass configuration into $Q_DHCP_CONF_FILE."
-    echo "Please convert that configuration in localrc to a $Q_DHCP_CONF_FILE section in local.conf:"
-    echo "Q_DHCP_EXTRA_DEFAULT_OPTS will be removed early in the Juno development cycle"
-    echo "
-[[post-config|/\$Q_DHCP_CONF_FILE]]
-[DEFAULT]
-"
-    for I in "${Q_DHCP_EXTRA_DEFAULT_OPTS[@]}"; do
-        # Replace the first '=' with ' ' for iniset syntax
-        echo ${I}
-    done
-fi
-
-# TODO(dtroyer): Remove Q_SRV_EXTRA_DEFAULT_OPTS after stable/icehouse branch is cut
-if [[ -n "$Q_SRV_EXTRA_DEFAULT_OPTS" ]]; then
-    echo ""
-    echo_summary "WARNING: Q_SRV_EXTRA_DEFAULT_OPTS is used"
-    echo "You are using Q_SRV_EXTRA_DEFAULT_OPTS to pass configuration into $NEUTRON_CONF."
-    echo "Please convert that configuration in localrc to a $NEUTRON_CONF section in local.conf:"
-    echo "Q_SRV_EXTRA_DEFAULT_OPTS will be removed early in the Juno development cycle"
-    echo "
-[[post-config|\$NEUTRON_CONF]]
-[DEFAULT]
-"
-    for I in "${Q_SRV_EXTRA_DEFAULT_OPTS[@]}"; do
-        # Replace the first '=' with ' ' for iniset syntax
-        echo ${I}
-    done
-fi
-
-# TODO(dtroyer): Remove CINDER_MULTI_LVM_BACKEND after stable/juno branch is cut
-if [[ "$CINDER_MULTI_LVM_BACKEND" = "True" ]]; then
-    echo ""
-    echo_summary "WARNING: CINDER_MULTI_LVM_BACKEND is used"
-    echo "You are using CINDER_MULTI_LVM_BACKEND to configure Cinder's multiple LVM backends"
-    echo "Please convert that configuration in local.conf to use CINDER_ENABLED_BACKENDS."
-    echo "CINDER_ENABLED_BACKENDS will be removed early in the 'K' development cycle"
-    echo "
+if is_service_enabled cinder; then
+    # TODO(dtroyer): Remove CINDER_MULTI_LVM_BACKEND after stable/juno branch is cut
+    if [[ "$CINDER_MULTI_LVM_BACKEND" = "True" ]]; then
+        echo ""
+        echo_summary "WARNING: CINDER_MULTI_LVM_BACKEND is used"
+        echo "You are using CINDER_MULTI_LVM_BACKEND to configure Cinder's multiple LVM backends"
+        echo "Please convert that configuration in local.conf to use CINDER_ENABLED_BACKENDS."
+        echo "CINDER_MULTI_LVM_BACKEND will be removed early in the 'K' development cycle"
+        echo "
 [[local|localrc]]
 CINDER_ENABLED_BACKENDS=lvm:lvmdriver-1,lvm:lvmdriver-2
 "
+    fi
 fi
 
 # Indicate how long this took to run (bash maintained variable ``SECONDS``)
diff --git a/stackrc b/stackrc
index 4d3e8fc..580fabf 100644
--- a/stackrc
+++ b/stackrc
@@ -52,6 +52,18 @@
     ENABLED_SERVICES+=,rabbit,tempest,mysql
 fi
 
+# SQLAlchemy supports multiple database drivers for each database server
+# type. For example, deployer may use MySQLdb, MySQLConnector, or oursql
+# to access MySQL database.
+#
+# When defined, the variable controls which database driver is used to
+# connect to database server. Otherwise using default driver defined for
+# each database type.
+#
+# You can find the list of currently supported drivers for each database
+# type at: http://docs.sqlalchemy.org/en/rel_0_9/core/engines.html
+# SQLALCHEMY_DATABASE_DRIVER="mysqldb"
+
 # Global toggle for enabling services under mod_wsgi. If this is set to
 # ``True`` all services that use HTTPD + mod_wsgi as the preferred method of
 # deployment, will be deployed under Apache. If this is set to ``False`` all
@@ -124,10 +136,17 @@
 CINDERCLIENT_REPO=${CINDERCLIENT_REPO:-${GIT_BASE}/openstack/python-cinderclient.git}
 CINDERCLIENT_BRANCH=${CINDERCLIENT_BRANCH:-master}
 
+# diskimage-builder
+DIB_REPO=${DIB_REPO:-${GIT_BASE}/openstack/diskimage-builder.git}
+DIB_BRANCH=${DIB_BRANCH:-master}
+
 # image catalog service
 GLANCE_REPO=${GLANCE_REPO:-${GIT_BASE}/openstack/glance.git}
 GLANCE_BRANCH=${GLANCE_BRANCH:-master}
 
+GLANCE_STORE_REPO=${GLANCE_STORE_REPO:-${GIT_BASE}/openstack/glance_store.git}
+GLANCE_STORE_BRANCH=${GLANCE_STORE_BRANCH:-master}
+
 # python glance client library
 GLANCECLIENT_REPO=${GLANCECLIENT_REPO:-${GIT_BASE}/openstack/python-glanceclient.git}
 GLANCECLIENT_BRANCH=${GLANCECLIENT_BRANCH:-master}
@@ -140,6 +159,14 @@
 HEATCLIENT_REPO=${HEATCLIENT_REPO:-${GIT_BASE}/openstack/python-heatclient.git}
 HEATCLIENT_BRANCH=${HEATCLIENT_BRANCH:-master}
 
+# heat-cfntools server agent
+HEAT_CFNTOOLS_REPO=${HEAT_CFNTOOLS_REPO:-${GIT_BASE}/openstack/heat-cfntools.git}
+HEAT_CFNTOOLS_BRANCH=${HEAT_CFNTOOLS_BRANCH:-master}
+
+# heat example templates and elements
+HEAT_TEMPLATES_REPO=${HEAT_TEMPLATES_REPO:-${GIT_BASE}/openstack/heat-templates.git}
+HEAT_TEMPLATES_BRANCH=${HEAT_TEMPLATES_BRANCH:-master}
+
 # django powered web control panel for openstack
 HORIZON_REPO=${HORIZON_REPO:-${GIT_BASE}/openstack/horizon.git}
 HORIZON_BRANCH=${HORIZON_BRANCH:-master}
@@ -148,9 +175,11 @@
 HORIZONAUTH_REPO=${HORIZONAUTH_REPO:-${GIT_BASE}/openstack/django_openstack_auth.git}
 HORIZONAUTH_BRANCH=${HORIZONAUTH_BRANCH:-master}
 
-# baremetal provisionint service
+# baremetal provisioning service
 IRONIC_REPO=${IRONIC_REPO:-${GIT_BASE}/openstack/ironic.git}
 IRONIC_BRANCH=${IRONIC_BRANCH:-master}
+IRONIC_PYTHON_AGENT_REPO=${IRONIC_PYTHON_AGENT_REPO:-${GIT_BASE}/openstack/ironic-python-agent.git}
+IRONIC_PYTHON_AGENT_BRANCH=${IRONIC_PYTHON_AGENT_BRANCH:-master}
 
 # ironic client
 IRONICCLIENT_REPO=${IRONICCLIENT_REPO:-${GIT_BASE}/openstack/python-ironicclient.git}
@@ -176,14 +205,30 @@
 NOVACLIENT_REPO=${NOVACLIENT_REPO:-${GIT_BASE}/openstack/python-novaclient.git}
 NOVACLIENT_BRANCH=${NOVACLIENT_BRANCH:-master}
 
+# os-apply-config configuration template tool
+OAC_REPO=${OAC_REPO:-${GIT_BASE}/openstack/os-apply-config.git}
+OAC_BRANCH=${OAC_BRANCH:-master}
+
+# os-collect-config configuration agent
+OCC_REPO=${OCC_REPO:-${GIT_BASE}/openstack/os-collect-config.git}
+OCC_BRANCH=${OCC_BRANCH:-master}
+
 # consolidated openstack python client
 OPENSTACKCLIENT_REPO=${OPENSTACKCLIENT_REPO:-${GIT_BASE}/openstack/python-openstackclient.git}
 OPENSTACKCLIENT_BRANCH=${OPENSTACKCLIENT_BRANCH:-master}
 
+# os-refresh-config configuration run-parts tool
+ORC_REPO=${ORC_REPO:-${GIT_BASE}/openstack/os-refresh-config.git}
+ORC_BRANCH=${ORC_BRANCH:-master}
+
 # cliff command line framework
 CLIFF_REPO=${CLIFF_REPO:-${GIT_BASE}/openstack/cliff.git}
 CLIFF_BRANCH=${CLIFF_BRANCH:-master}
 
+# oslo.concurrency
+OSLOCON_REPO=${OSLOCON_REPO:-${GIT_BASE}/openstack/oslo.concurrency.git}
+OSLOCON_BRANCH=${OSLOCON_BRANCH:-master}
+
 # oslo.config
 OSLOCFG_REPO=${OSLOCFG_REPO:-${GIT_BASE}/openstack/oslo.config.git}
 OSLOCFG_BRANCH=${OSLOCFG_BRANCH:-master}
@@ -196,14 +241,30 @@
 OSLOI18N_REPO=${OSLOI18N_REPO:-${GIT_BASE}/openstack/oslo.i18n.git}
 OSLOI18N_BRANCH=${OSLOI18N_BRANCH:-master}
 
+# oslo.log
+OSLOLOG_REPO=${OSLOLOG_REPO:-${GIT_BASE}/openstack/oslo.log.git}
+OSLOLOG_BRANCH=${OSLOLOG_BRANCH:-master}
+
 # oslo.messaging
 OSLOMSG_REPO=${OSLOMSG_REPO:-${GIT_BASE}/openstack/oslo.messaging.git}
 OSLOMSG_BRANCH=${OSLOMSG_BRANCH:-master}
 
+# oslo.middleware
+OSLOMID_REPO=${OSLOMID_REPO:-${GIT_BASE}/openstack/oslo.middleware.git}
+OSLOMID_BRANCH=${OSLOMID_BRANCH:-master}
+
 # oslo.rootwrap
 OSLORWRAP_REPO=${OSLORWRAP_REPO:-${GIT_BASE}/openstack/oslo.rootwrap.git}
 OSLORWRAP_BRANCH=${OSLORWRAP_BRANCH:-master}
 
+# oslo.serialization
+OSLOSERIALIZATION_REPO=${OSLOSERIALIZATION_REPO:-${GIT_BASE}/openstack/oslo.serialization.git}
+OSLOSERIALIZATION_BRANCH=${OSLOSERIALIZATION_BRANCH:-master}
+
+# oslo.utils
+OSLOUTILS_REPO=${OSLOUTILS_REPO:-${GIT_BASE}/openstack/oslo.utils.git}
+OSLOUTILS_BRANCH=${OSLOUTILS_BRANCH:-master}
+
 # oslo.vmware
 OSLOVMWARE_REPO=${OSLOVMWARE_REPO:-${GIT_BASE}/openstack/oslo.vmware.git}
 OSLOVMWARE_BRANCH=${OSLOVMWARE_BRANCH:-master}
@@ -250,10 +311,12 @@
 TEMPEST_REPO=${TEMPEST_REPO:-${GIT_BASE}/openstack/tempest.git}
 TEMPEST_BRANCH=${TEMPEST_BRANCH:-master}
 
+TEMPEST_LIB_REPO=${TEMPEST_LIB_REPO:-${GIT_BASE}/openstack/tempest-lib.git}
+TEMPEST_LIB_BRANCH=${TEMPEST_LIB_BRANCH:-master}
 
-# diskimage-builder
-DIB_REPO=${DIB_REPO:-${GIT_BASE}/openstack/diskimage-builder.git}
-DIB_BRANCH=${DIB_BRANCH:-master}
+# Tripleo elements for diskimage-builder images
+TIE_REPO=${TIE_REPO:-${GIT_BASE}/openstack/tripleo-image-elements.git}
+TIE_BRANCH=${TIE_BRANCH:-master}
 
 # a websockets/html5 or flash powered VNC console for vm instances
 NOVNC_REPO=${NOVNC_REPO:-https://github.com/kanaka/noVNC.git}
@@ -284,6 +347,10 @@
 PECAN_REPO=${PECAN_REPO:-${GIT_BASE}/stackforge/pecan.git}
 PECAN_BRANCH=${PECAN_BRANCH:-master}
 
+# sqlalchemy-migrate
+SQLALCHEMY_MIGRATE_REPO=${SQLALCHEMY_MIGRATE_REPO:-${GIT_BASE}/stackforge/sqlalchemy-migrate.git}
+SQLALCHEMY_MIGRATE_BRANCH=${SQLALCHEMY_MIGRATE_BRANCH:-master}
+
 
 # Nova hypervisor configuration.  We default to libvirt with **kvm** but will
 # drop back to **qemu** if we are unable to load the kvm module.  ``stack.sh`` can
@@ -332,14 +399,15 @@
 #    glance as a disk image.  If it ends in .gz, it is uncompressed first.
 #    example:
 #      http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-armel-disk1.img
-#      http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-rootfs.img.gz
+#      http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz
 #  * OpenVZ image:
 #    OpenVZ uses its own format of image, and does not support UEC style images
 
 #IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image
-#IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-disk.img" # cirros full disk image
+#IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image
 
 CIRROS_VERSION=${CIRROS_VERSION:-"0.3.2"}
+CIRROS_ARCH=${CIRROS_ARCH:-"x86_64"}
 
 # Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of
 # which may be set in ``localrc``.  Also allow ``DEFAULT_IMAGE_NAME`` and
@@ -351,11 +419,11 @@
     libvirt)
         case "$LIBVIRT_TYPE" in
             lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc
-                DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-x86_64-rootfs}
-                IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-rootfs.img.gz"};;
+                DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs}
+                IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz"};;
             *) # otherwise, use the uec style image (with kernel, ramdisk, disk)
-                DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-x86_64-uec}
-                IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz"};;
+                DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec}
+                IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec.tar.gz"};;
         esac
         ;;
     vsphere)
@@ -363,10 +431,20 @@
         IMAGE_URLS=${IMAGE_URLS:-"http://partnerweb.vmware.com/programs/vmdkimage/cirros-0.3.2-i386-disk.vmdk"};;
     xenserver)
         DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.0-x86_64-disk}
-        IMAGE_URLS=${IMAGE_URLS:-"https://github.com/downloads/citrix-openstack/warehouse/cirros-0.3.0-x86_64-disk.vhd.tgz"};;
+        IMAGE_URLS=${IMAGE_URLS:-"https://github.com/downloads/citrix-openstack/warehouse/cirros-0.3.0-x86_64-disk.vhd.tgz"}
+        IMAGE_URLS+=",http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz";;
+    ironic)
+        # Ironic can do both partition and full disk images, depending on the driver
+        if [[ "$IRONIC_DEPLOY_DRIVER" == "agent_ssh" ]]; then
+            DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-x86_64-disk}
+        else
+            DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-x86_64-uec}
+        fi
+        IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz"}
+        IMAGE_URLS+=",http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-disk.img";;
     *) # Default to Cirros with kernel, ramdisk and disk image
-        DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-x86_64-uec}
-        IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz"};;
+        DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec}
+        IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec.tar.gz"};;
 esac
 
 # Use 64bit fedora image if heat is enabled
@@ -384,7 +462,7 @@
 # Trove needs a custom image for it's work
 if [[ "$ENABLED_SERVICES" =~ 'tr-api' ]]; then
     case "$VIRT_DRIVER" in
-        libvirt|baremetal|ironic)
+        libvirt|baremetal|ironic|xenapi)
             TROVE_GUEST_IMAGE_URL=${TROVE_GUEST_IMAGE_URL:-"http://tarballs.openstack.org/trove/images/ubuntu_mysql.qcow2/ubuntu_mysql.qcow2"}
             IMAGE_URLS+=",${TROVE_GUEST_IMAGE_URL}"
             ;;
@@ -430,6 +508,15 @@
 # Undo requirements changes by global requirements
 UNDO_REQUIREMENTS=${UNDO_REQUIREMENTS:-True}
 
+# Allow the use of an alternate protocol (such as https) for service endpoints
+SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http}
+
+# Sets the maximum number of workers for most services to reduce
+# the memory used where there are a large number of CPUs present
+# (the default number of workers for many services is the number of CPUs)
+# Also sets the minimum number of workers to 2.
+API_WORKERS=${API_WORKERS:=$(( ($(nproc)/2)<2 ? 2 : ($(nproc)/2) ))}
+
 # Local variables:
 # mode: shell-script
 # End:
diff --git a/tests/fake-service.sh b/tests/fake-service.sh
new file mode 100755
index 0000000..d4b9b56
--- /dev/null
+++ b/tests/fake-service.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+# fake-service.sh - a fake service for start/stop testing
+# $1 - sleep time
+
+SLEEP_TIME=${1:-3}
+
+LOG=/tmp/fake-service.log
+TIMESTAMP_FORMAT=${TIMESTAMP_FORMAT:-"%F-%H%M%S"}
+
+# duplicate output
+exec 1> >(tee -a ${LOG})
+
+echo ""
+echo "Starting fake-service for ${SLEEP_TIME}"
+while true; do
+    echo "$(date +${TIMESTAMP_FORMAT}) [$$]"
+    sleep ${SLEEP_TIME}
+done
+
diff --git a/tests/run-process.sh b/tests/run-process.sh
new file mode 100755
index 0000000..bdf1395
--- /dev/null
+++ b/tests/run-process.sh
@@ -0,0 +1,109 @@
+#!/bin/bash
+# tests/exec.sh - Test DevStack run_process() and stop_process()
+#
+# exec.sh start|stop|status
+#
+# Set USE_SCREEN True|False to change use of screen.
+#
+# This script emulates the basic exec envirnment in ``stack.sh`` to test
+# the process spawn and kill operations.
+
+if [[ -z $1 ]]; then
+    echo "$0 start|stop"
+    exit 1
+fi
+
+TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
+source $TOP_DIR/functions
+
+USE_SCREEN=${USE_SCREEN:-False}
+
+ENABLED_SERVICES=fake-service
+
+SERVICE_DIR=/tmp
+SCREEN_NAME=test
+SCREEN_LOGDIR=${SERVICE_DIR}/${SCREEN_NAME}
+
+
+# Kill background processes on exit
+trap clean EXIT
+clean() {
+    local r=$?
+    jobs -p
+    kill >/dev/null 2>&1 $(jobs -p)
+    exit $r
+}
+
+
+# Exit on any errors so that errors don't compound
+trap failed ERR
+failed() {
+    local r=$?
+    jobs -p
+    kill >/dev/null 2>&1 $(jobs -p)
+    set +o xtrace
+    [ -n "$LOGFILE" ] && echo "${0##*/} failed: full log in $LOGFILE"
+    exit $r
+}
+
+function status {
+    if [[ -r $SERVICE_DIR/$SCREEN_NAME/fake-service.pid ]]; then
+        pstree -pg $(cat $SERVICE_DIR/$SCREEN_NAME/fake-service.pid)
+    fi
+    ps -ef | grep fake
+}
+
+function setup_screen {
+if [[ ! -d $SERVICE_DIR/$SCREEN_NAME ]]; then
+    rm -rf $SERVICE_DIR/$SCREEN_NAME
+    mkdir -p $SERVICE_DIR/$SCREEN_NAME
+fi
+
+if [[ "$USE_SCREEN" == "True" ]]; then
+    # Create a new named screen to run processes in
+    screen -d -m -S $SCREEN_NAME -t shell -s /bin/bash
+    sleep 1
+
+    # Set a reasonable status bar
+    if [ -z "$SCREEN_HARDSTATUS" ]; then
+        SCREEN_HARDSTATUS='%{= .} %-Lw%{= .}%> %n%f %t*%{= .}%+Lw%< %-=%{g}(%{d}%H/%l%{g})'
+    fi
+    screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS"
+fi
+
+# Clear screen rc file
+SCREENRC=$TOP_DIR/tests/$SCREEN_NAME-screenrc
+if [[ -e $SCREENRC ]]; then
+    echo -n > $SCREENRC
+fi
+}
+
+# Mimic logging
+    # Set up output redirection without log files
+    # Copy stdout to fd 3
+    exec 3>&1
+    if [[ "$VERBOSE" != "True" ]]; then
+        # Throw away stdout and stderr
+        #exec 1>/dev/null 2>&1
+        :
+    fi
+    # Always send summary fd to original stdout
+    exec 6>&3
+
+
+if [[ "$1" == "start" ]]; then
+    echo "Start service"
+    setup_screen
+    run_process fake-service "$TOP_DIR/tests/fake-service.sh"
+    sleep 1
+    status
+elif [[ "$1" == "stop" ]]; then
+    echo "Stop service"
+    stop_process fake-service
+    status
+elif [[ "$1" == "status" ]]; then
+    status
+else
+    echo "Unknown command"
+    exit 1
+fi
diff --git a/tools/build_bm.sh b/tools/build_bm.sh
deleted file mode 100755
index ab0ba0e..0000000
--- a/tools/build_bm.sh
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/usr/bin/env bash
-
-# **build_bm.sh**
-
-# Build an OpenStack install on a bare metal machine.
-set +x
-
-# Keep track of the current directory
-TOOLS_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $TOOLS_DIR/..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Source params
-source ./stackrc
-
-# Param string to pass to stack.sh.  Like "EC2_DMZ_HOST=192.168.1.1 MYSQL_USER=nova"
-STACKSH_PARAMS=${STACKSH_PARAMS:-}
-
-# Option to use the version of devstack on which we are currently working
-USE_CURRENT_DEVSTACK=${USE_CURRENT_DEVSTACK:-1}
-
-# Configure the runner
-RUN_SH=`mktemp`
-cat > $RUN_SH <<EOF
-#!/usr/bin/env bash
-# Install and run stack.sh
-cd devstack
-$STACKSH_PARAMS ./stack.sh
-EOF
-
-# Make the run.sh executable
-chmod 755 $RUN_SH
-
-scp -r . root@$CONTAINER_IP:devstack
-scp $RUN_SH root@$CONTAINER_IP:$RUN_SH
-ssh root@$CONTAINER_IP $RUN_SH
diff --git a/tools/build_bm_multi.sh b/tools/build_bm_multi.sh
deleted file mode 100755
index 328d576..0000000
--- a/tools/build_bm_multi.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/env bash
-
-# **build_bm_multi.sh**
-
-# Build an OpenStack install on several bare metal machines.
-SHELL_AFTER_RUN=no
-
-# Variables common amongst all hosts in the cluster
-COMMON_VARS="MYSQL_HOST=$HEAD_HOST RABBIT_HOST=$HEAD_HOST GLANCE_HOSTPORT=$HEAD_HOST:9292 NETWORK_MANAGER=FlatDHCPManager FLAT_INTERFACE=eth0 FLOATING_RANGE=$FLOATING_RANGE MULTI_HOST=1 SHELL_AFTER_RUN=$SHELL_AFTER_RUN"
-
-# Helper to launch containers
-function run_bm {
-    # For some reason container names with periods can cause issues :/
-    CONTAINER=$1 CONTAINER_IP=$2 CONTAINER_NETMASK=$NETMASK CONTAINER_GATEWAY=$GATEWAY NAMESERVER=$NAMESERVER TERMINATE=$TERMINATE STACKSH_PARAMS="$COMMON_VARS $3" ./tools/build_bm.sh
-}
-
-# Launch the head node - headnode uses a non-ip domain name,
-# because rabbit won't launch with an ip addr hostname :(
-run_bm STACKMASTER $HEAD_HOST "ENABLED_SERVICES=g-api,g-reg,key,n-api,n-sch,n-vnc,horizon,mysql,rabbit"
-
-# Wait till the head node is up
-if [ ! "$TERMINATE" = "1" ]; then
-    echo "Waiting for head node ($HEAD_HOST) to start..."
-    if ! timeout 60 sh -c "while ! wget -q -O- http://$HEAD_HOST | grep -q username; do sleep 1; done"; then
-        echo "Head node did not start"
-        exit 1
-    fi
-fi
-
-PIDS=""
-# Launch the compute hosts in parallel
-for compute_host in ${COMPUTE_HOSTS//,/ }; do
-    run_bm $compute_host $compute_host "ENABLED_SERVICES=n-cpu,n-net,n-api" &
-    PIDS="$PIDS $!"
-done
-
-for x in $PIDS; do
-    wait $x
-done
-echo "build_bm_multi complete"
diff --git a/tools/build_pxe_env.sh b/tools/build_pxe_env.sh
deleted file mode 100755
index 50d91d0..0000000
--- a/tools/build_pxe_env.sh
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/bin/bash -e
-
-# **build_pxe_env.sh**
-
-# Create a PXE boot environment
-#
-# build_pxe_env.sh destdir
-#
-# Requires Ubuntu Oneiric
-#
-# Only needs to run as root if the destdir permissions require it
-
-dpkg -l syslinux || apt-get install -y syslinux
-
-DEST_DIR=${1:-/tmp}/tftpboot
-PXEDIR=${PXEDIR:-/opt/ramstack/pxe}
-PROGDIR=`dirname $0`
-
-# Clean up any resources that may be in use
-function cleanup {
-    set +o errexit
-
-    # Mop up temporary files
-    if [ -n "$MNTDIR" -a -d "$MNTDIR" ]; then
-        umount $MNTDIR
-        rmdir $MNTDIR
-    fi
-
-    # Kill ourselves to signal any calling process
-    trap 2; kill -2 $$
-}
-
-trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT EXIT
-
-# Keep track of the current directory
-TOOLS_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=`cd $TOOLS_DIR/..; pwd`
-
-mkdir -p $DEST_DIR/pxelinux.cfg
-cd $DEST_DIR
-for i in memdisk menu.c32 pxelinux.0; do
-    cp -pu /usr/lib/syslinux/$i $DEST_DIR
-done
-
-CFG=$DEST_DIR/pxelinux.cfg/default
-cat >$CFG <<EOF
-default menu.c32
-prompt 0
-timeout 0
-
-MENU TITLE devstack PXE Boot Menu
-
-EOF
-
-# Setup devstack boot
-mkdir -p $DEST_DIR/ubuntu
-if [ ! -d $PXEDIR ]; then
-    mkdir -p $PXEDIR
-fi
-
-# Get image into place
-if [ ! -r $PXEDIR/stack-initrd.img ]; then
-    cd $TOP_DIR
-    $PROGDIR/build_ramdisk.sh $PXEDIR/stack-initrd.img
-fi
-if [ ! -r $PXEDIR/stack-initrd.gz ]; then
-    gzip -1 -c $PXEDIR/stack-initrd.img >$PXEDIR/stack-initrd.gz
-fi
-cp -pu $PXEDIR/stack-initrd.gz $DEST_DIR/ubuntu
-
-if [ ! -r $PXEDIR/vmlinuz-*-generic ]; then
-    MNTDIR=`mktemp -d --tmpdir mntXXXXXXXX`
-    mount -t ext4 -o loop $PXEDIR/stack-initrd.img $MNTDIR
-
-    if [ ! -r $MNTDIR/boot/vmlinuz-*-generic ]; then
-        echo "No kernel found"
-        umount $MNTDIR
-        rmdir $MNTDIR
-        exit 1
-    else
-        cp -pu $MNTDIR/boot/vmlinuz-*-generic $PXEDIR
-    fi
-    umount $MNTDIR
-    rmdir $MNTDIR
-fi
-
-# Get generic kernel version
-KNAME=`basename $PXEDIR/vmlinuz-*-generic`
-KVER=${KNAME#vmlinuz-}
-cp -pu $PXEDIR/vmlinuz-$KVER $DEST_DIR/ubuntu
-cat >>$CFG <<EOF
-
-LABEL devstack
-    MENU LABEL ^devstack
-    MENU DEFAULT
-    KERNEL ubuntu/vmlinuz-$KVER
-    APPEND initrd=ubuntu/stack-initrd.gz ramdisk_size=2109600 root=/dev/ram0
-EOF
-
-# Get Ubuntu
-if [ -d $PXEDIR -a -r $PXEDIR/natty-base-initrd.gz ]; then
-    cp -pu $PXEDIR/natty-base-initrd.gz $DEST_DIR/ubuntu
-    cat >>$CFG <<EOF
-
-LABEL ubuntu
-    MENU LABEL ^Ubuntu Natty
-    KERNEL ubuntu/vmlinuz-$KVER
-    APPEND initrd=ubuntu/natty-base-initrd.gz ramdisk_size=419600 root=/dev/ram0
-EOF
-fi
-
-# Local disk boot
-cat >>$CFG <<EOF
-
-LABEL local
-    MENU LABEL ^Local disk
-    LOCALBOOT 0
-EOF
-
-trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT EXIT
diff --git a/tools/build_ramdisk.sh b/tools/build_ramdisk.sh
deleted file mode 100755
index 50ba8ef..0000000
--- a/tools/build_ramdisk.sh
+++ /dev/null
@@ -1,230 +0,0 @@
-#!/bin/bash
-
-# **build_ramdisk.sh**
-
-# Build RAM disk images
-
-# Exit on error to stop unexpected errors
-set -o errexit
-
-if [ ! "$#" -eq "1" ]; then
-    echo "$0 builds a gziped Ubuntu OpenStack install"
-    echo "usage: $0 dest"
-    exit 1
-fi
-
-# Clean up any resources that may be in use
-function cleanup {
-    set +o errexit
-
-    # Mop up temporary files
-    if [ -n "$MNTDIR" -a -d "$MNTDIR" ]; then
-        umount $MNTDIR
-        rmdir $MNTDIR
-    fi
-    if [ -n "$DEV_FILE_TMP" -a -e "$DEV_FILE_TMP" ]; then
-        rm -f $DEV_FILE_TMP
-    fi
-    if [ -n "$IMG_FILE_TMP" -a -e "$IMG_FILE_TMP" ]; then
-        rm -f $IMG_FILE_TMP
-    fi
-
-    # Release NBD devices
-    if [ -n "$NBD" ]; then
-        qemu-nbd -d $NBD
-    fi
-
-    # Kill ourselves to signal any calling process
-    trap 2; kill -2 $$
-}
-
-trap cleanup SIGHUP SIGINT SIGTERM
-
-# Set up nbd
-modprobe nbd max_part=63
-
-# Echo commands
-set -o xtrace
-
-IMG_FILE=$1
-
-# Keep track of the current directory
-TOOLS_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $TOOLS_DIR/..; pwd)
-
-# Import common functions
-. $TOP_DIR/functions
-
-# Store cwd
-CWD=`pwd`
-
-cd $TOP_DIR
-
-# Source params
-source ./stackrc
-
-CACHEDIR=${CACHEDIR:-/opt/stack/cache}
-
-DEST=${DEST:-/opt/stack}
-
-# Configure the root password of the vm to be the same as ``ADMIN_PASSWORD``
-ROOT_PASSWORD=${ADMIN_PASSWORD:-password}
-
-# Base image (natty by default)
-DIST_NAME=${DIST_NAME:-natty}
-
-# Param string to pass to stack.sh.  Like "EC2_DMZ_HOST=192.168.1.1 MYSQL_USER=nova"
-STACKSH_PARAMS=${STACKSH_PARAMS:-}
-
-# Option to use the version of devstack on which we are currently working
-USE_CURRENT_DEVSTACK=${USE_CURRENT_DEVSTACK:-1}
-
-# clean install
-if [ ! -r $CACHEDIR/$DIST_NAME-base.img ]; then
-    $TOOLS_DIR/get_uec_image.sh $DIST_NAME $CACHEDIR/$DIST_NAME-base.img
-fi
-
-# Finds and returns full device path for the next available NBD device.
-# Exits script if error connecting or none free.
-# map_nbd image
-function map_nbd {
-    for i in `seq 0 15`; do
-        if [ ! -e /sys/block/nbd$i/pid ]; then
-            NBD=/dev/nbd$i
-            # Connect to nbd and wait till it is ready
-            qemu-nbd -c $NBD $1
-            if ! timeout 60 sh -c "while ! [ -e ${NBD}p1 ]; do sleep 1; done"; then
-                echo "Couldn't connect $NBD"
-                exit 1
-            fi
-            break
-        fi
-    done
-    if [ -z "$NBD" ]; then
-        echo "No free NBD slots"
-        exit 1
-    fi
-    echo $NBD
-}
-
-# Prime image with as many apt as we can
-DEV_FILE=$CACHEDIR/$DIST_NAME-dev.img
-DEV_FILE_TMP=`mktemp $DEV_FILE.XXXXXX`
-if [ ! -r $DEV_FILE ]; then
-    cp -p $CACHEDIR/$DIST_NAME-base.img $DEV_FILE_TMP
-
-    NBD=`map_nbd $DEV_FILE_TMP`
-    MNTDIR=`mktemp -d --tmpdir mntXXXXXXXX`
-    mount -t ext4 ${NBD}p1 $MNTDIR
-    cp -p /etc/resolv.conf $MNTDIR/etc/resolv.conf
-
-    chroot $MNTDIR apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1`
-    chroot $MNTDIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1`
-
-    # Create a stack user that is a member of the libvirtd group so that stack
-    # is able to interact with libvirt.
-    chroot $MNTDIR groupadd libvirtd
-    chroot $MNTDIR useradd $STACK_USER -s /bin/bash -d $DEST -G libvirtd
-    mkdir -p $MNTDIR/$DEST
-    chroot $MNTDIR chown $STACK_USER $DEST
-
-    # A simple password - pass
-    echo $STACK_USER:pass | chroot $MNTDIR chpasswd
-    echo root:$ROOT_PASSWORD | chroot $MNTDIR chpasswd
-
-    # And has sudo ability (in the future this should be limited to only what
-    # stack requires)
-    echo "$STACK_USER ALL=(ALL) NOPASSWD: ALL" >> $MNTDIR/etc/sudoers
-
-    umount $MNTDIR
-    rmdir $MNTDIR
-    qemu-nbd -d $NBD
-    NBD=""
-    mv $DEV_FILE_TMP $DEV_FILE
-fi
-rm -f $DEV_FILE_TMP
-
-
-# Clone git repositories onto the system
-# ======================================
-
-IMG_FILE_TMP=`mktemp $IMG_FILE.XXXXXX`
-
-if [ ! -r $IMG_FILE ]; then
-    NBD=`map_nbd $DEV_FILE`
-
-    # Pre-create the image file
-    # FIXME(dt): This should really get the partition size to
-    # pre-create the image file
-    dd if=/dev/zero of=$IMG_FILE_TMP bs=1 count=1 seek=$((2*1024*1024*1024))
-    # Create filesystem image for RAM disk
-    dd if=${NBD}p1 of=$IMG_FILE_TMP bs=1M
-
-    qemu-nbd -d $NBD
-    NBD=""
-    mv $IMG_FILE_TMP $IMG_FILE
-fi
-rm -f $IMG_FILE_TMP
-
-MNTDIR=`mktemp -d --tmpdir mntXXXXXXXX`
-mount -t ext4 -o loop $IMG_FILE $MNTDIR
-cp -p /etc/resolv.conf $MNTDIR/etc/resolv.conf
-
-# We need to install a non-virtual kernel and modules to boot from
-if [ ! -r "`ls $MNTDIR/boot/vmlinuz-*-generic | head -1`" ]; then
-    chroot $MNTDIR apt-get install -y linux-generic
-fi
-
-git_clone $NOVA_REPO $DEST/nova $NOVA_BRANCH
-git_clone $GLANCE_REPO $DEST/glance $GLANCE_BRANCH
-git_clone $KEYSTONE_REPO $DEST/keystone $KEYSTONE_BRANCH
-git_clone $NOVNC_REPO $DEST/novnc $NOVNC_BRANCH
-git_clone $HORIZON_REPO $DEST/horizon $HORIZON_BRANCH
-git_clone $NOVACLIENT_REPO $DEST/python-novaclient $NOVACLIENT_BRANCH
-git_clone $OPENSTACKX_REPO $DEST/openstackx $OPENSTACKX_BRANCH
-
-# Use this version of devstack
-rm -rf $MNTDIR/$DEST/devstack
-cp -pr $CWD $MNTDIR/$DEST/devstack
-chroot $MNTDIR chown -R $STACK_USER $DEST/devstack
-
-# Configure host network for DHCP
-mkdir -p $MNTDIR/etc/network
-cat > $MNTDIR/etc/network/interfaces <<EOF
-auto lo
-iface lo inet loopback
-
-auto eth0
-iface eth0 inet dhcp
-EOF
-
-# Set hostname
-echo "ramstack" >$MNTDIR/etc/hostname
-echo "127.0.0.1		localhost	ramstack" >$MNTDIR/etc/hosts
-
-# Configure the runner
-RUN_SH=$MNTDIR/$DEST/run.sh
-cat > $RUN_SH <<EOF
-#!/usr/bin/env bash
-
-# Get IP range
-set \`ip addr show dev eth0 | grep inet\`
-PREFIX=\`echo \$2 | cut -d. -f1,2,3\`
-export FLOATING_RANGE="\$PREFIX.224/27"
-
-# Kill any existing screens
-killall screen
-
-# Run stack.sh
-cd $DEST/devstack && \$STACKSH_PARAMS ./stack.sh > $DEST/run.sh.log
-echo >> $DEST/run.sh.log
-echo >> $DEST/run.sh.log
-echo "All done! Time to start clicking." >> $DEST/run.sh.log
-EOF
-
-# Make the run.sh executable
-chmod 755 $RUN_SH
-chroot $MNTDIR chown $STACK_USER $DEST/run.sh
-
-umount $MNTDIR
-rmdir $MNTDIR
diff --git a/tools/build_uec_ramdisk.sh b/tools/build_uec_ramdisk.sh
deleted file mode 100755
index 5f3acc5..0000000
--- a/tools/build_uec_ramdisk.sh
+++ /dev/null
@@ -1,180 +0,0 @@
-#!/usr/bin/env bash
-
-# **build_uec_ramdisk.sh**
-
-# Build RAM disk images based on UEC image
-
-# Exit on error to stop unexpected errors
-set -o errexit
-
-if [ ! "$#" -eq "1" ]; then
-    echo "$0 builds a gziped Ubuntu OpenStack install"
-    echo "usage: $0 dest"
-    exit 1
-fi
-
-# Make sure that we have the proper version of ubuntu (only works on oneiric)
-if ! egrep -q "oneiric" /etc/lsb-release; then
-    echo "This script only works with ubuntu oneiric."
-    exit 1
-fi
-
-# Clean up resources that may be in use
-function cleanup {
-    set +o errexit
-
-    if [ -n "$MNT_DIR" ]; then
-        umount $MNT_DIR/dev
-        umount $MNT_DIR
-    fi
-
-    if [ -n "$DEST_FILE_TMP" ]; then
-        rm $DEST_FILE_TMP
-    fi
-
-    # Kill ourselves to signal parents
-    trap 2; kill -2 $$
-}
-
-trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT EXIT
-
-# Output dest image
-DEST_FILE=$1
-
-# Keep track of the current directory
-TOOLS_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $TOOLS_DIR/..; pwd)
-
-# Import common functions
-. $TOP_DIR/functions
-
-cd $TOP_DIR
-
-# Source params
-source ./stackrc
-
-DEST=${DEST:-/opt/stack}
-
-# Ubuntu distro to install
-DIST_NAME=${DIST_NAME:-oneiric}
-
-# Configure how large the VM should be
-GUEST_SIZE=${GUEST_SIZE:-2G}
-
-# Exit on error to stop unexpected errors
-set -o errexit
-set -o xtrace
-
-# Abort if localrc is not set
-if [ ! -e $TOP_DIR/localrc ]; then
-    echo "You must have a localrc with ALL necessary passwords defined before proceeding."
-    echo "See stack.sh for required passwords."
-    exit 1
-fi
-
-# Install deps if needed
-DEPS="kvm libvirt-bin kpartx cloud-utils curl"
-apt_get install -y --force-yes $DEPS
-
-# Where to store files and instances
-CACHEDIR=${CACHEDIR:-/opt/stack/cache}
-WORK_DIR=${WORK_DIR:-/opt/ramstack}
-
-# Where to store images
-image_dir=$WORK_DIR/images/$DIST_NAME
-mkdir -p $image_dir
-
-# Get the base image if it does not yet exist
-if [ ! -e $image_dir/disk ]; then
-    $TOOLS_DIR/get_uec_image.sh -r 2000M $DIST_NAME $image_dir/disk
-fi
-
-# Configure the root password of the vm to be the same as ``ADMIN_PASSWORD``
-ROOT_PASSWORD=${ADMIN_PASSWORD:-password}
-
-# Name of our instance, used by libvirt
-GUEST_NAME=${GUEST_NAME:-devstack}
-
-# Pre-load the image with basic environment
-if [ ! -e $image_dir/disk-primed ]; then
-    cp $image_dir/disk $image_dir/disk-primed
-    $TOOLS_DIR/warm_apts_for_uec.sh $image_dir/disk-primed
-    $TOOLS_DIR/copy_dev_environment_to_uec.sh $image_dir/disk-primed
-fi
-
-# Back to devstack
-cd $TOP_DIR
-
-DEST_FILE_TMP=`mktemp $DEST_FILE.XXXXXX`
-MNT_DIR=`mktemp -d --tmpdir mntXXXXXXXX`
-cp $image_dir/disk-primed $DEST_FILE_TMP
-mount -t ext4 -o loop $DEST_FILE_TMP $MNT_DIR
-mount -o bind /dev /$MNT_DIR/dev
-cp -p /etc/resolv.conf $MNT_DIR/etc/resolv.conf
-echo root:$ROOT_PASSWORD | chroot $MNT_DIR chpasswd
-touch $MNT_DIR/$DEST/.ramdisk
-
-# We need to install a non-virtual kernel and modules to boot from
-if [ ! -r "`ls $MNT_DIR/boot/vmlinuz-*-generic | head -1`" ]; then
-    chroot $MNT_DIR apt-get install -y linux-generic
-fi
-
-git_clone $NOVA_REPO $DEST/nova $NOVA_BRANCH
-git_clone $GLANCE_REPO $DEST/glance $GLANCE_BRANCH
-git_clone $KEYSTONE_REPO $DEST/keystone $KEYSTONE_BRANCH
-git_clone $NOVNC_REPO $DEST/novnc $NOVNC_BRANCH
-git_clone $HORIZON_REPO $DEST/horizon $HORIZON_BRANCH
-git_clone $NOVACLIENT_REPO $DEST/python-novaclient $NOVACLIENT_BRANCH
-git_clone $OPENSTACKX_REPO $DEST/openstackx $OPENSTACKX_BRANCH
-git_clone $TEMPEST_REPO $DEST/tempest $TEMPEST_BRANCH
-
-# Use this version of devstack
-rm -rf $MNT_DIR/$DEST/devstack
-cp -pr $TOP_DIR $MNT_DIR/$DEST/devstack
-chroot $MNT_DIR chown -R stack $DEST/devstack
-
-# Configure host network for DHCP
-mkdir -p $MNT_DIR/etc/network
-cat > $MNT_DIR/etc/network/interfaces <<EOF
-auto lo
-iface lo inet loopback
-
-auto eth0
-iface eth0 inet dhcp
-EOF
-
-# Set hostname
-echo "ramstack" >$MNT_DIR/etc/hostname
-echo "127.0.0.1		localhost	ramstack" >$MNT_DIR/etc/hosts
-
-# Configure the runner
-RUN_SH=$MNT_DIR/$DEST/run.sh
-cat > $RUN_SH <<EOF
-#!/usr/bin/env bash
-
-# Get IP range
-set \`ip addr show dev eth0 | grep inet\`
-PREFIX=\`echo \$2 | cut -d. -f1,2,3\`
-export FLOATING_RANGE="\$PREFIX.224/27"
-
-# Kill any existing screens
-killall screen
-
-# Run stack.sh
-cd $DEST/devstack && \$STACKSH_PARAMS ./stack.sh > $DEST/run.sh.log
-echo >> $DEST/run.sh.log
-echo >> $DEST/run.sh.log
-echo "All done! Time to start clicking." >> $DEST/run.sh.log
-EOF
-
-# Make the run.sh executable
-chmod 755 $RUN_SH
-chroot $MNT_DIR chown stack $DEST/run.sh
-
-umount $MNT_DIR/dev
-umount $MNT_DIR
-rmdir $MNT_DIR
-mv $DEST_FILE_TMP $DEST_FILE
-rm -f $DEST_FILE_TMP
-
-trap - SIGHUP SIGINT SIGTERM SIGQUIT EXIT
diff --git a/tools/build_usb_boot.sh b/tools/build_usb_boot.sh
deleted file mode 100755
index c97e0a1..0000000
--- a/tools/build_usb_boot.sh
+++ /dev/null
@@ -1,148 +0,0 @@
-#!/bin/bash -e
-
-# **build_usb_boot.sh**
-
-# Create a syslinux boot environment
-#
-# build_usb_boot.sh destdev
-#
-# Assumes syslinux is installed
-# Needs to run as root
-
-DEST_DIR=${1:-/tmp/syslinux-boot}
-PXEDIR=${PXEDIR:-/opt/ramstack/pxe}
-
-# Clean up any resources that may be in use
-function cleanup {
-    set +o errexit
-
-    # Mop up temporary files
-    if [ -n "$DEST_DEV" ]; then
-        umount $DEST_DIR
-        rmdir $DEST_DIR
-    fi
-    if [ -n "$MNTDIR" -a -d "$MNTDIR" ]; then
-        umount $MNTDIR
-        rmdir $MNTDIR
-    fi
-
-    # Kill ourselves to signal any calling process
-    trap 2; kill -2 $$
-}
-
-trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT EXIT
-
-# Keep track of the current directory
-TOOLS_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=`cd $TOOLS_DIR/..; pwd`
-
-if [ -b $DEST_DIR ]; then
-    # We have a block device, install syslinux and mount it
-    DEST_DEV=$DEST_DIR
-    DEST_DIR=`mktemp -d --tmpdir mntXXXXXX`
-    mount $DEST_DEV $DEST_DIR
-
-    if [ ! -d $DEST_DIR/syslinux ]; then
-        mkdir -p $DEST_DIR/syslinux
-    fi
-
-    # Install syslinux on the device
-    syslinux --install --directory syslinux $DEST_DEV
-else
-    # We have a directory (for sanity checking output)
-    DEST_DEV=""
-    if [ ! -d $DEST_DIR/syslinux ]; then
-        mkdir -p $DEST_DIR/syslinux
-    fi
-fi
-
-# Get some more stuff from syslinux
-for i in memdisk menu.c32; do
-    cp -pu /usr/lib/syslinux/$i $DEST_DIR/syslinux
-done
-
-CFG=$DEST_DIR/syslinux/syslinux.cfg
-cat >$CFG <<EOF
-default /syslinux/menu.c32
-prompt 0
-timeout 0
-
-MENU TITLE devstack Boot Menu
-
-EOF
-
-# Setup devstack boot
-mkdir -p $DEST_DIR/ubuntu
-if [ ! -d $PXEDIR ]; then
-    mkdir -p $PXEDIR
-fi
-
-# Get image into place
-if [ ! -r $PXEDIR/stack-initrd.img ]; then
-    cd $TOP_DIR
-    $TOOLS_DIR/build_uec_ramdisk.sh $PXEDIR/stack-initrd.img
-fi
-if [ ! -r $PXEDIR/stack-initrd.gz ]; then
-    gzip -1 -c $PXEDIR/stack-initrd.img >$PXEDIR/stack-initrd.gz
-fi
-cp -pu $PXEDIR/stack-initrd.gz $DEST_DIR/ubuntu
-
-if [ ! -r $PXEDIR/vmlinuz-*-generic ]; then
-    MNTDIR=`mktemp -d --tmpdir mntXXXXXXXX`
-    mount -t ext4 -o loop $PXEDIR/stack-initrd.img $MNTDIR
-
-    if [ ! -r $MNTDIR/boot/vmlinuz-*-generic ]; then
-        echo "No kernel found"
-        umount $MNTDIR
-        rmdir $MNTDIR
-        if [ -n "$DEST_DEV" ]; then
-            umount $DEST_DIR
-            rmdir $DEST_DIR
-        fi
-        exit 1
-    else
-        cp -pu $MNTDIR/boot/vmlinuz-*-generic $PXEDIR
-    fi
-    umount $MNTDIR
-    rmdir $MNTDIR
-fi
-
-# Get generic kernel version
-KNAME=`basename $PXEDIR/vmlinuz-*-generic`
-KVER=${KNAME#vmlinuz-}
-cp -pu $PXEDIR/vmlinuz-$KVER $DEST_DIR/ubuntu
-cat >>$CFG <<EOF
-
-LABEL devstack
-    MENU LABEL ^devstack
-    MENU DEFAULT
-    KERNEL /ubuntu/vmlinuz-$KVER
-    APPEND initrd=/ubuntu/stack-initrd.gz ramdisk_size=2109600 root=/dev/ram0
-EOF
-
-# Get Ubuntu
-if [ -d $PXEDIR -a -r $PXEDIR/natty-base-initrd.gz ]; then
-    cp -pu $PXEDIR/natty-base-initrd.gz $DEST_DIR/ubuntu
-    cat >>$CFG <<EOF
-
-LABEL ubuntu
-    MENU LABEL ^Ubuntu Natty
-    KERNEL /ubuntu/vmlinuz-$KVER
-    APPEND initrd=/ubuntu/natty-base-initrd.gz ramdisk_size=419600 root=/dev/ram0
-EOF
-fi
-
-# Local disk boot
-cat >>$CFG <<EOF
-
-LABEL local
-    MENU LABEL ^Local disk
-    LOCALBOOT 0
-EOF
-
-if [ -n "$DEST_DEV" ]; then
-    umount $DEST_DIR
-    rmdir $DEST_DIR
-fi
-
-trap - SIGHUP SIGINT SIGTERM SIGQUIT EXIT
diff --git a/tools/copy_dev_environment_to_uec.sh b/tools/copy_dev_environment_to_uec.sh
deleted file mode 100755
index 94a4926..0000000
--- a/tools/copy_dev_environment_to_uec.sh
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/usr/bin/env bash
-
-# **copy_dev_environment_to_uec.sh**
-
-# Echo commands
-set -o xtrace
-
-# Exit on error to stop unexpected errors
-set -o errexit
-
-# Keep track of the current directory
-TOOLS_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $TOOLS_DIR/..; pwd)
-
-# Import common functions
-. $TOP_DIR/functions
-
-# Change dir to top of devstack
-cd $TOP_DIR
-
-# Source params
-source ./stackrc
-
-# Echo usage
-function usage {
-    echo "Add stack user and keys"
-    echo ""
-    echo "Usage: $0 [full path to raw uec base image]"
-}
-
-# Make sure this is a raw image
-if ! qemu-img info $1 | grep -q "file format: raw"; then
-    usage
-    exit 1
-fi
-
-# Mount the image
-DEST=/opt/stack
-STAGING_DIR=/tmp/`echo $1 | sed  "s/\//_/g"`.stage.user
-mkdir -p $STAGING_DIR
-umount $STAGING_DIR || true
-sleep 1
-mount -t ext4 -o loop $1 $STAGING_DIR
-mkdir -p $STAGING_DIR/$DEST
-
-# Create a stack user that is a member of the libvirtd group so that stack
-# is able to interact with libvirt.
-chroot $STAGING_DIR groupadd libvirtd || true
-chroot $STAGING_DIR useradd $STACK_USER -s /bin/bash -d $DEST -G libvirtd || true
-
-# Add a simple password - pass
-echo $STACK_USER:pass | chroot $STAGING_DIR chpasswd
-
-# Configure sudo
-( umask 226 && echo "$STACK_USER ALL=(ALL) NOPASSWD:ALL" \
-    > $STAGING_DIR/etc/sudoers.d/50_stack_sh )
-
-# Copy over your ssh keys and env if desired
-cp_it ~/.ssh $STAGING_DIR/$DEST/.ssh
-cp_it ~/.ssh/id_rsa.pub $STAGING_DIR/$DEST/.ssh/authorized_keys
-cp_it ~/.gitconfig $STAGING_DIR/$DEST/.gitconfig
-cp_it ~/.vimrc $STAGING_DIR/$DEST/.vimrc
-cp_it ~/.bashrc $STAGING_DIR/$DEST/.bashrc
-
-# Copy devstack
-rm -rf $STAGING_DIR/$DEST/devstack
-cp_it . $STAGING_DIR/$DEST/devstack
-
-# Give stack ownership over $DEST so it may do the work needed
-chroot $STAGING_DIR chown -R $STACK_USER $DEST
-
-# Unmount
-umount $STAGING_DIR
diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh
index 5b1111a..863fe03 100755
--- a/tools/create_userrc.sh
+++ b/tools/create_userrc.sh
@@ -37,6 +37,7 @@
 -C <tenant_name> create user and tenant, the specifid tenant will be the user's tenant
 -r <name> when combined with -C and the (-u) user exists it will be the user's tenant role in the (-C)tenant (default: Member)
 -p <userpass> password for the user
+--heat-url <heat_url>
 --os-username <username>
 --os-password <admin password>
 --os-tenant-name <tenant_name>
@@ -53,12 +54,13 @@
 EOF
 }
 
-if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-name:,os-tenant-id:,os-auth-url:,target-dir:,skip-tenant:,os-cacert:,help,debug -- "$@"); then
+if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-name:,os-tenant-id:,os-auth-url:,target-dir:,heat-url:,skip-tenant:,os-cacert:,help,debug -- "$@"); then
     display_help
     exit 1
 fi
 eval set -- $options
 ADDPASS=""
+HEAT_URL=""
 
 # The services users usually in the service tenant.
 # rc files for service users, is out of scope.
@@ -79,6 +81,7 @@
     --os-auth-url) export OS_AUTH_URL=$2; shift ;;
     --os-cacert) export OS_CACERT=$2; shift ;;
     --target-dir) ACCOUNT_DIR=$2; shift ;;
+    --heat-url) HEAT_URL=$2; shift ;;
     --debug) set -o xtrace ;;
     -u) MODE=${MODE:-one};  USER_NAME=$2; shift ;;
     -p) USER_PASS=$2; shift ;;
@@ -209,6 +212,10 @@
     if [ -n "$ADDPASS" ]; then
         echo "export OS_PASSWORD=\"$user_passwd\"" >>"$rcfile"
     fi
+    if [ -n "$HEAT_URL" ]; then
+        echo "export HEAT_URL=\"$HEAT_URL/$tenant_id\"" >>"$rcfile"
+        echo "export OS_NO_CLIENT_AUTH=True" >>"$rcfile"
+    fi
 }
 
 #admin users expected
diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh
index f1dc76a..1732ecc 100755
--- a/tools/fixup_stuff.sh
+++ b/tools/fixup_stuff.sh
@@ -20,20 +20,24 @@
 #   - pre-install hgtools to work around a bug in RHEL6 distribute
 #   - install nose 1.1 from EPEL
 
-set -o errexit
-set -o xtrace
+# If TOP_DIR is set we're being sourced rather than running stand-alone
+# or in a sub-shell
+if [[ -z "$TOP_DIR" ]]; then
+    set -o errexit
+    set -o xtrace
 
-# Keep track of the current directory
-TOOLS_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $TOOLS_DIR/..; pwd)
+    # Keep track of the current directory
+    TOOLS_DIR=$(cd $(dirname "$0") && pwd)
+    TOP_DIR=$(cd $TOOLS_DIR/..; pwd)
 
-# Change dir to top of devstack
-cd $TOP_DIR
+    # Change dir to top of devstack
+    cd $TOP_DIR
 
-# Import common functions
-source $TOP_DIR/functions
+    # Import common functions
+    source $TOP_DIR/functions
 
-FILES=$TOP_DIR/files
+    FILES=$TOP_DIR/files
+fi
 
 # Keystone Port Reservation
 # -------------------------
@@ -93,30 +97,27 @@
     sudo chmod +r $dir/*
 fi
 
-# Ubuntu 12.04
-# ------------
-
-# We can regularly get kernel crashes on the 12.04 default kernel, so attempt
-# to install a new kernel
-if [[ ${DISTRO} =~ (precise) ]]; then
-    # Finally, because we suspect the Precise kernel is problematic, install a new kernel
-    UPGRADE_KERNEL=$(trueorfalse False $UPGRADE_KERNEL)
-    if [[ $UPGRADE_KERNEL == "True" ]]; then
-        if [[ ! `uname -r` =~ (^3\.11) ]]; then
-            apt_get install linux-generic-lts-saucy
-            echo "Installing Saucy LTS kernel, please reboot before proceeding"
-            exit 1
-        fi
-    fi
-fi
-
-
 if is_fedora; then
     # Disable selinux to avoid configuring to allow Apache access
     # to Horizon files (LP#1175444)
     if selinuxenabled; then
         sudo setenforce 0
     fi
+
+    FORCE_FIREWALLD=$(trueorfalse False $FORCE_FIREWALLD)
+    if [[ ${DISTRO} =~ (f19|f20) && $FORCE_FIREWALLD == "False" ]]; then
+        # On Fedora 19 and 20 firewalld interacts badly with libvirt and
+        # slows things down significantly.  However, for those cases
+        # where that combination is desired, allow this fix to be skipped.
+
+        # There was also an additional issue with firewalld hanging
+        # after install of libvirt with polkit.  See
+        # https://bugzilla.redhat.com/show_bug.cgi?id=1099031
+        if is_package_installed firewalld; then
+            uninstall_package firewalld
+        fi
+    fi
+
 fi
 
 # RHEL6
@@ -124,6 +125,14 @@
 
 if [[ $DISTRO =~ (rhel6) ]]; then
 
+    # install_pip.sh installs the latest setuptools over the packaged
+    # version.  We can't really uninstall the packaged version if it
+    # is there, because it may remove other important things like
+    # cloud-init.  Things work, but there can be an old egg file left
+    # around from the package that causes some really strange
+    # setuptools errors.  Remove it, if it is there
+    sudo rm -f /usr/lib/python2.6/site-packages/setuptools-0.6*.egg-info
+
     # If the ``dbus`` package was installed by DevStack dependencies the
     # uuid may not be generated because the service was never started (PR#598200),
     # causing Nova to stop later on complaining that ``/var/lib/dbus/machine-id``
diff --git a/tools/get_uec_image.sh b/tools/get_uec_image.sh
deleted file mode 100755
index 225742c..0000000
--- a/tools/get_uec_image.sh
+++ /dev/null
@@ -1,109 +0,0 @@
-#!/bin/bash
-
-# **get_uec_image.sh**
-
-# Download and prepare Ubuntu UEC images
-
-CACHEDIR=${CACHEDIR:-/opt/stack/cache}
-ROOTSIZE=${ROOTSIZE:-2000M}
-
-# Keep track of the current directory
-TOOLS_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $TOOLS_DIR/..; pwd)
-
-# Import common functions
-. $TOP_DIR/functions
-
-# Exit on error to stop unexpected errors
-set -o errexit
-set -o xtrace
-
-function usage {
-    echo "Usage: $0 - Download and prepare Ubuntu UEC images"
-    echo ""
-    echo "$0 [-r rootsize] release imagefile [kernel]"
-    echo ""
-    echo "-r size   - root fs size (min 2000MB)"
-    echo "release   - Ubuntu release: lucid - quantal"
-    echo "imagefile - output image file"
-    echo "kernel    - output kernel"
-    exit 1
-}
-
-# Clean up any resources that may be in use
-function cleanup {
-    set +o errexit
-
-    # Mop up temporary files
-    if [ -n "$IMG_FILE_TMP" -a -e "$IMG_FILE_TMP" ]; then
-        rm -f $IMG_FILE_TMP
-    fi
-
-    # Kill ourselves to signal any calling process
-    trap 2; kill -2 $$
-}
-
-while getopts hr: c; do
-    case $c in
-        h)  usage
-            ;;
-        r)  ROOTSIZE=$OPTARG
-            ;;
-    esac
-done
-shift `expr $OPTIND - 1`
-
-if [[ ! "$#" -eq "2" && ! "$#" -eq "3" ]]; then
-    usage
-fi
-
-# Default args
-DIST_NAME=$1
-IMG_FILE=$2
-IMG_FILE_TMP=`mktemp $IMG_FILE.XXXXXX`
-KERNEL=$3
-
-case $DIST_NAME in
-    saucy)      ;;
-    raring)     ;;
-    quantal)    ;;
-    precise)    ;;
-    *)          echo "Unknown release: $DIST_NAME"
-                usage
-                ;;
-esac
-
-trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT EXIT
-
-# Check dependencies
-if [ ! -x "`which qemu-img`" -o -z "`dpkg -l | grep cloud-utils`" ]; then
-    # Missing KVM?
-    apt_get install qemu-kvm cloud-utils
-fi
-
-# Find resize script
-RESIZE=`which resize-part-image || which uec-resize-image`
-if [ -z "$RESIZE" ]; then
-    echo "resize tool from cloud-utils not found"
-    exit 1
-fi
-
-# Get the UEC image
-UEC_NAME=$DIST_NAME-server-cloudimg-amd64
-if [ ! -d $CACHEDIR/$DIST_NAME ]; then
-    mkdir -p $CACHEDIR/$DIST_NAME
-fi
-if [ ! -e $CACHEDIR/$DIST_NAME/$UEC_NAME.tar.gz ]; then
-    (cd $CACHEDIR/$DIST_NAME && wget -N http://uec-images.ubuntu.com/$DIST_NAME/current/$UEC_NAME.tar.gz)
-    (cd $CACHEDIR/$DIST_NAME && tar Sxvzf $UEC_NAME.tar.gz)
-fi
-
-$RESIZE $CACHEDIR/$DIST_NAME/$UEC_NAME.img ${ROOTSIZE} $IMG_FILE_TMP
-mv $IMG_FILE_TMP $IMG_FILE
-
-# Copy kernel to destination
-if [ -n "$KERNEL" ]; then
-    cp -p $CACHEDIR/$DIST_NAME/*-vmlinuz-virtual $KERNEL
-fi
-
-trap - SIGHUP SIGINT SIGTERM SIGQUIT EXIT
diff --git a/tools/install_openvpn.sh b/tools/install_openvpn.sh
deleted file mode 100755
index 9a4f036..0000000
--- a/tools/install_openvpn.sh
+++ /dev/null
@@ -1,221 +0,0 @@
-#!/bin/bash
-
-# **install_openvpn.sh**
-
-# Install OpenVPN and generate required certificates
-#
-# install_openvpn.sh --client name
-# install_openvpn.sh --server [name]
-#
-# name is used on the CN of the generated cert, and the filename of
-# the configuration, certificate and key files.
-#
-# --server mode configures the host with a running OpenVPN server instance
-# --client mode creates a tarball of a client configuration for this server
-
-# Get config file
-if [ -e localrc ]; then
-    . localrc
-fi
-if [ -e vpnrc ]; then
-    . vpnrc
-fi
-
-# Do some IP manipulation
-function cidr2netmask {
-    set -- $(( 5 - ($1 / 8) )) 255 255 255 255 $(( (255 << (8 - ($1 % 8))) & 255 )) 0 0 0
-    if [[ $1 -gt 1 ]]; then
-        shift $1
-    else
-        shift
-    fi
-    echo ${1-0}.${2-0}.${3-0}.${4-0}
-}
-
-FIXED_NET=`echo $FIXED_RANGE | cut -d'/' -f1`
-FIXED_CIDR=`echo $FIXED_RANGE | cut -d'/' -f2`
-FIXED_MASK=`cidr2netmask $FIXED_CIDR`
-
-# VPN Config
-VPN_SERVER=${VPN_SERVER:-`ifconfig eth0 | awk "/inet addr:/ { print \$2 }" | cut -d: -f2`}  # 50.56.12.212
-VPN_PROTO=${VPN_PROTO:-tcp}
-VPN_PORT=${VPN_PORT:-6081}
-VPN_DEV=${VPN_DEV:-tap0}
-VPN_BRIDGE=${VPN_BRIDGE:-br100}
-VPN_BRIDGE_IF=${VPN_BRIDGE_IF:-$FLAT_INTERFACE}
-VPN_CLIENT_NET=${VPN_CLIENT_NET:-$FIXED_NET}
-VPN_CLIENT_MASK=${VPN_CLIENT_MASK:-$FIXED_MASK}
-VPN_CLIENT_DHCP="${VPN_CLIENT_DHCP:-net.1 net.254}"
-
-VPN_DIR=/etc/openvpn
-CA_DIR=$VPN_DIR/easy-rsa
-
-function usage {
-    echo "$0 - OpenVPN install and certificate generation"
-    echo ""
-    echo "$0 --client name"
-    echo "$0 --server [name]"
-    echo ""
-    echo " --server mode configures the host with a running OpenVPN server instance"
-    echo " --client mode creates a tarball of a client configuration for this server"
-    exit 1
-}
-
-if [ -z $1 ]; then
-    usage
-fi
-
-# Install OpenVPN
-VPN_EXEC=`which openvpn`
-if [ -z "$VPN_EXEC" -o ! -x "$VPN_EXEC" ]; then
-    apt-get install -y openvpn bridge-utils
-fi
-if [ ! -d $CA_DIR ]; then
-    cp -pR /usr/share/doc/openvpn/examples/easy-rsa/2.0/ $CA_DIR
-fi
-
-# Keep track of the current directory
-TOOLS_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $TOOLS_DIR/.. && pwd)
-
-WEB_DIR=$TOP_DIR/../vpn
-if [[ ! -d $WEB_DIR ]]; then
-    mkdir -p $WEB_DIR
-fi
-WEB_DIR=$(cd $TOP_DIR/../vpn && pwd)
-
-cd $CA_DIR
-source ./vars
-
-# Override the defaults
-export KEY_COUNTRY="US"
-export KEY_PROVINCE="TX"
-export KEY_CITY="SanAntonio"
-export KEY_ORG="Cloudbuilders"
-export KEY_EMAIL="rcb@lists.rackspace.com"
-
-if [ ! -r $CA_DIR/keys/dh1024.pem ]; then
-    # Initialize a new CA
-    $CA_DIR/clean-all
-    $CA_DIR/build-dh
-    $CA_DIR/pkitool --initca
-    openvpn --genkey --secret $CA_DIR/keys/ta.key  ## Build a TLS key
-fi
-
-function do_server {
-    NAME=$1
-    # Generate server certificate
-    $CA_DIR/pkitool --server $NAME
-
-    (cd $CA_DIR/keys;
-        cp $NAME.crt $NAME.key ca.crt dh1024.pem ta.key $VPN_DIR
-    )
-    cat >$VPN_DIR/br-up <<EOF
-#!/bin/bash
-
-BR="$VPN_BRIDGE"
-TAP="\$1"
-
-if [[ ! -d /sys/class/net/\$BR ]]; then
-    brctl addbr \$BR
-fi
-
-for t in \$TAP; do
-    openvpn --mktun --dev \$t
-    brctl addif \$BR \$t
-    ifconfig \$t 0.0.0.0 promisc up
-done
-EOF
-    chmod +x $VPN_DIR/br-up
-    cat >$VPN_DIR/br-down <<EOF
-#!/bin/bash
-
-BR="$VPN_BRIDGE"
-TAP="\$1"
-
-for i in \$TAP; do
-    brctl delif \$BR $t
-    openvpn --rmtun --dev \$i
-done
-EOF
-    chmod +x $VPN_DIR/br-down
-    cat >$VPN_DIR/$NAME.conf <<EOF
-proto $VPN_PROTO
-port $VPN_PORT
-dev $VPN_DEV
-up $VPN_DIR/br-up
-down $VPN_DIR/br-down
-cert $NAME.crt
-key $NAME.key  # This file should be kept secret
-ca ca.crt
-dh dh1024.pem
-duplicate-cn
-server-bridge $VPN_CLIENT_NET $VPN_CLIENT_MASK $VPN_CLIENT_DHCP
-ifconfig-pool-persist ipp.txt
-comp-lzo
-user nobody
-group nogroup
-persist-key
-persist-tun
-status openvpn-status.log
-EOF
-    /etc/init.d/openvpn restart
-}
-
-function do_client {
-    NAME=$1
-    # Generate a client certificate
-    $CA_DIR/pkitool $NAME
-
-    TMP_DIR=`mktemp -d`
-    (cd $CA_DIR/keys;
-        cp -p ca.crt ta.key $NAME.key $NAME.crt $TMP_DIR
-    )
-    if [ -r $VPN_DIR/hostname ]; then
-        HOST=`cat $VPN_DIR/hostname`
-    else
-        HOST=`hostname`
-    fi
-    cat >$TMP_DIR/$HOST.conf <<EOF
-proto $VPN_PROTO
-port $VPN_PORT
-dev $VPN_DEV
-cert $NAME.crt
-key $NAME.key  # This file should be kept secret
-ca ca.crt
-client
-remote $VPN_SERVER $VPN_PORT
-resolv-retry infinite
-nobind
-user nobody
-group nogroup
-persist-key
-persist-tun
-comp-lzo
-verb 3
-EOF
-    (cd $TMP_DIR; tar cf $WEB_DIR/$NAME.tar *)
-    rm -rf $TMP_DIR
-    echo "Client certificate and configuration is in $WEB_DIR/$NAME.tar"
-}
-
-# Process command line args
-case $1 in
-    --client)   if [ -z $2 ]; then
-                    usage
-                fi
-                do_client $2
-                ;;
-    --server)   if [ -z $2 ]; then
-                    NAME=`hostname`
-                else
-                    NAME=$2
-                    # Save for --client use
-                    echo $NAME >$VPN_DIR/hostname
-                fi
-                do_server $NAME
-                ;;
-    --clean)    $CA_DIR/clean-all
-                ;;
-    *)          usage
-esac
diff --git a/tools/install_pip.sh b/tools/install_pip.sh
index 150faaa..55ef93e 100755
--- a/tools/install_pip.sh
+++ b/tools/install_pip.sh
@@ -50,6 +50,25 @@
 }
 
 
+function configure_pypi_alternative_url {
+    PIP_ROOT_FOLDER="$HOME/.pip"
+    PIP_CONFIG_FILE="$PIP_ROOT_FOLDER/pip.conf"
+    if [[ ! -d $PIP_ROOT_FOLDER ]]; then
+        echo "Creating $PIP_ROOT_FOLDER"
+        mkdir $PIP_ROOT_FOLDER
+    fi
+    if [[ ! -f $PIP_CONFIG_FILE ]]; then
+        echo "Creating $PIP_CONFIG_FILE"
+        touch $PIP_CONFIG_FILE
+    fi
+    if ! ini_has_option "$PIP_CONFIG_FILE" "global" "index-url"; then
+        #it means that the index-url does not exist
+        iniset "$PIP_CONFIG_FILE" "global" "index-url" "$PYPI_OVERRIDE"
+    fi
+
+}
+
+
 # Show starting versions
 get_versions
 
@@ -60,6 +79,10 @@
 
 install_get_pip
 
+if [[ -n $PYPI_ALTERNATIVE_URL ]]; then
+    configure_pypi_alternative_url
+fi
+
 pip_install -U setuptools
 
 get_versions
diff --git a/tools/jenkins/README.md b/tools/jenkins/README.md
deleted file mode 100644
index 3586da9..0000000
--- a/tools/jenkins/README.md
+++ /dev/null
@@ -1,38 +0,0 @@
-Getting Started With Jenkins and Devstack
-=========================================
-This little corner of devstack is to show how to get an OpenStack jenkins
-environment up and running quickly, using the rcb configuration methodology.
-
-
-To create a jenkins server
---------------------------
-
-    cd tools/jenkins/jenkins_home
-    ./build_jenkins.sh
-
-This will create a jenkins environment configured with sample test scripts that run against xen and kvm.
-
-Configuring XS
---------------
-In order to make the tests for XS work, you must install xs 5.6 on a separate machine,
-and install the the jenkins public key on that server.  You then need to create the
-/var/lib/jenkins/xenrc on your jenkins server like so:
-
-    MYSQL_PASSWORD=secrete
-    SERVICE_TOKEN=secrete
-    ADMIN_PASSWORD=secrete
-    RABBIT_PASSWORD=secrete
-    # This is the password for your guest (for both stack and root users)
-    GUEST_PASSWORD=secrete
-    # Do not download the usual images yet!
-    IMAGE_URLS=""
-    FLOATING_RANGE=192.168.1.224/28
-    VIRT_DRIVER=xenserver
-    # Explicitly set multi-host
-    MULTI_HOST=1
-    # Give extra time for boot
-    ACTIVE_TIMEOUT=45
-    #  IMPORTANT: This is the ip of your xenserver
-    XEN_IP=10.5.5.1
-    # IMPORTANT: The following must be set to your dom0 root password!
-    XENAPI_PASSWORD='MY_XEN_ROOT_PW'
diff --git a/tools/jenkins/adapters/euca.sh b/tools/jenkins/adapters/euca.sh
deleted file mode 100755
index a7e635c..0000000
--- a/tools/jenkins/adapters/euca.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-# Echo commands, exit on error
-set -o xtrace
-set -o errexit
-
-TOP_DIR=$(cd ../../.. && pwd)
-HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2`
-die_if_not_set $LINENO HEAD_IP "Failure retrieving HEAD_IP"
-ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises &&  ./euca.sh'
diff --git a/tools/jenkins/adapters/floating_ips.sh b/tools/jenkins/adapters/floating_ips.sh
deleted file mode 100755
index 8da1eeb..0000000
--- a/tools/jenkins/adapters/floating_ips.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-# Echo commands, exit on error
-set -o xtrace
-set -o errexit
-
-TOP_DIR=$(cd ../../.. && pwd)
-HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2`
-die_if_not_set $LINENO HEAD_IP "Failure retrieving HEAD_IP"
-ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises &&  ./floating_ips.sh'
diff --git a/tools/jenkins/adapters/swift.sh b/tools/jenkins/adapters/swift.sh
deleted file mode 100755
index c1362ee..0000000
--- a/tools/jenkins/adapters/swift.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-# Echo commands, exit on error
-set -o xtrace
-set -o errexit
-
-TOP_DIR=$(cd ../../.. && pwd)
-HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2`
-ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises &&  ./swift.sh'
diff --git a/tools/jenkins/adapters/volumes.sh b/tools/jenkins/adapters/volumes.sh
deleted file mode 100755
index 0a0b6c0..0000000
--- a/tools/jenkins/adapters/volumes.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-# Echo commands, exit on error
-set -o xtrace
-set -o errexit
-
-TOP_DIR=$(cd ../../.. && pwd)
-HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2`
-die_if_not_set $LINENO HEAD_IP "Failure retrieving HEAD_IP"
-ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises &&  ./volumes.sh'
diff --git a/tools/jenkins/build_configuration.sh b/tools/jenkins/build_configuration.sh
deleted file mode 100755
index 64ee159..0000000
--- a/tools/jenkins/build_configuration.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/bash
-
-EXECUTOR_NUMBER=$1
-CONFIGURATION=$2
-ADAPTER=$3
-RC=$4
-
-function usage {
-    echo "Usage: $0 -  Build a configuration"
-    echo ""
-    echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION] [ADAPTER] [RC (optional)]"
-    exit 1
-}
-
-# Validate inputs
-if [[ "$EXECUTOR_NUMBER" = "" || "$CONFIGURATION" = ""  || "$ADAPTER" = "" ]]; then
-    usage
-fi
-
-# Execute configuration script
-cd configurations && ./$CONFIGURATION.sh $EXECUTOR_NUMBER $CONFIGURATION $ADAPTER "$RC"
diff --git a/tools/jenkins/configurations/kvm.sh b/tools/jenkins/configurations/kvm.sh
deleted file mode 100755
index 6927fd7..0000000
--- a/tools/jenkins/configurations/kvm.sh
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/bin/bash
-
-# exit on error to stop unexpected errors
-set -o errexit
-set -o xtrace
-
-EXECUTOR_NUMBER=$1
-CONFIGURATION=$2
-ADAPTER=$3
-RC=$4
-
-function usage {
-    echo "Usage: $0 - Build a test configuration"
-    echo ""
-    echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION] [ADAPTER] [RC (optional)]"
-    exit 1
-}
-
-# Validate inputs
-if [[ "$EXECUTOR_NUMBER" = "" || "$CONFIGURATION" = ""  || "$ADAPTER" = "" ]]; then
-    usage
-fi
-
-# This directory
-CUR_DIR=$(cd $(dirname "$0") && pwd)
-
-# devstack directory
-cd ../../..
-TOP_DIR=$(pwd)
-
-# Deps
-apt-get install -y --force-yes libvirt-bin || true
-
-# Name test instance based on executor
-BASE_NAME=executor-`printf "%02d" $EXECUTOR_NUMBER`
-GUEST_NAME=$BASE_NAME.$ADAPTER
-virsh list | grep $BASE_NAME | cut -d " " -f1 | xargs -n 1 virsh destroy || true
-virsh net-list | grep $BASE_NAME | cut -d " " -f1 | xargs -n 1 virsh net-destroy || true
-
-# Configure localrc
-cat <<EOF >localrc
-RECLONE=yes
-GUEST_NETWORK=$EXECUTOR_NUMBER
-GUEST_NAME=$GUEST_NAME
-FLOATING_RANGE=192.168.$EXECUTOR_NUMBER.128/27
-GUEST_CORES=1
-GUEST_RAM=12574720
-MYSQL_PASSWORD=chicken
-RABBIT_PASSWORD=chicken
-SERVICE_TOKEN=chicken
-SERVICE_PASSWORD=chicken
-ADMIN_PASSWORD=chicken
-USERNAME=admin
-TENANT=admin
-NET_NAME=$BASE_NAME
-ACTIVE_TIMEOUT=45
-BOOT_TIMEOUT=45
-$RC
-EOF
-cd tools
-sudo ./build_uec.sh
-
-# Make the address of the instances available to test runners
-echo HEAD=`cat /var/lib/libvirt/dnsmasq/$BASE_NAME.leases | cut -d " " -f3` > $TOP_DIR/addresses
diff --git a/tools/jenkins/configurations/xs.sh b/tools/jenkins/configurations/xs.sh
deleted file mode 100755
index 7b671e9..0000000
--- a/tools/jenkins/configurations/xs.sh
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/bin/bash
-set -o errexit
-set -o xtrace
-
-
-EXECUTOR_NUMBER=$1
-CONFIGURATION=$2
-ADAPTER=$3
-RC=$4
-
-function usage {
-    echo "Usage: $0 - Build a test configuration"
-    echo ""
-    echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION] [ADAPTER] [RC (optional)]"
-    exit 1
-}
-
-# Validate inputs
-if [[ "$EXECUTOR_NUMBER" = "" || "$CONFIGURATION" = ""  || "$ADAPTER" = "" ]]; then
-    usage
-fi
-
-# Configuration of xenrc
-XENRC=/var/lib/jenkins/xenrc
-if [ ! -e $XENRC ]; then
-    echo "/var/lib/jenkins/xenrc is not present! See README.md"
-    exit 1
-fi
-
-# Move to top of devstack
-cd ../../..
-
-# Use xenrc as the start of our localrc
-cp $XENRC localrc
-
-# Set the PUB_IP
-PUB_IP=192.168.1.1$EXECUTOR_NUMBER
-echo "PUB_IP=$PUB_IP" >> localrc
-
-# Overrides
-echo "$RC" >> localrc
-
-# Source localrc
-. localrc
-
-# Make host ip available to tester
-echo "HEAD=$PUB_IP" > addresses
-
-# Build configuration
-REMOTE_DEVSTACK=/root/devstack
-ssh root@$XEN_IP "rm -rf $REMOTE_DEVSTACK"
-scp -pr . root@$XEN_IP:$REMOTE_DEVSTACK
-ssh root@$XEN_IP "cd $REMOTE_DEVSTACK/tools/xen && ./build_domU.sh"
diff --git a/tools/jenkins/jenkins_home/.gitignore b/tools/jenkins/jenkins_home/.gitignore
deleted file mode 100644
index d831d01..0000000
--- a/tools/jenkins/jenkins_home/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-builds
-workspace
-*.sw*
diff --git a/tools/jenkins/jenkins_home/build_jenkins.sh b/tools/jenkins/jenkins_home/build_jenkins.sh
deleted file mode 100755
index a556db0..0000000
--- a/tools/jenkins/jenkins_home/build_jenkins.sh
+++ /dev/null
@@ -1,108 +0,0 @@
-#!/bin/bash
-
-# Echo commands, exit on error
-set -o xtrace
-set -o errexit
-
-# Make sure only root can run our script
-if [[ $EUID -ne 0 ]]; then
-    echo "This script must be run as root"
-    exit 1
-fi
-
-# This directory
-CUR_DIR=$(cd $(dirname "$0") && pwd)
-
-# Configure trunk jenkins!
-echo "deb http://pkg.jenkins-ci.org/debian binary/" > /etc/apt/sources.list.d/jenkins.list
-wget -q -O - http://pkg.jenkins-ci.org/debian/jenkins-ci.org.key | sudo apt-key add -
-apt-get update
-
-
-# Clean out old jenkins - useful if you are having issues upgrading
-CLEAN_JENKINS=${CLEAN_JENKINS:-no}
-if [ "$CLEAN_JENKINS" = "yes" ]; then
-    apt-get remove jenkins jenkins-common
-fi
-
-# Install software
-DEPS="jenkins cloud-utils"
-apt-get install -y --force-yes $DEPS
-
-# Install jenkins
-if [ ! -e /var/lib/jenkins ]; then
-    echo "Jenkins installation failed"
-    exit 1
-fi
-
-# Make sure user has configured a jenkins ssh pubkey
-if [ ! -e /var/lib/jenkins/.ssh/id_rsa.pub ]; then
-    echo "Public key for jenkins is missing.  This is used to ssh into your instances."
-    echo "Please run "su -c ssh-keygen jenkins" before proceeding"
-    exit 1
-fi
-
-# Setup sudo
-JENKINS_SUDO=/etc/sudoers.d/jenkins
-cat > $JENKINS_SUDO <<EOF
-jenkins ALL = NOPASSWD: ALL
-EOF
-chmod 440 $JENKINS_SUDO
-
-# Setup .gitconfig
-JENKINS_GITCONF=/var/lib/jenkins/hudson.plugins.git.GitSCM.xml
-cat > $JENKINS_GITCONF <<EOF
-<?xml version='1.0' encoding='UTF-8'?>
-<hudson.plugins.git.GitSCM_-DescriptorImpl>
-  <generation>4</generation>
-  <globalConfigName>Jenkins</globalConfigName>
-  <globalConfigEmail>jenkins@rcb.me</globalConfigEmail>
-</hudson.plugins.git.GitSCM_-DescriptorImpl>
-EOF
-
-# Add build numbers
-JOBS=`ls jobs`
-for job in ${JOBS// / }; do
-    if [ ! -e jobs/$job/nextBuildNumber ]; then
-        echo 1 > jobs/$job/nextBuildNumber
-    fi
-done
-
-# Set ownership to jenkins
-chown -R jenkins $CUR_DIR
-
-# Make sure this directory is accessible to jenkins
-if ! su -c "ls $CUR_DIR" jenkins; then
-    echo "Your devstack directory is not accessible by jenkins."
-    echo "There is a decent chance you are trying to run this from a directory in /root."
-    echo "If so, try moving devstack elsewhere (eg. /opt/devstack)."
-    exit 1
-fi
-
-# Move aside old jobs, if present
-if [ ! -h /var/lib/jenkins/jobs ]; then
-    echo "Installing jobs symlink"
-    if [ -d /var/lib/jenkins/jobs ]; then
-        mv /var/lib/jenkins/jobs /var/lib/jenkins/jobs.old
-    fi
-fi
-
-# Set up jobs symlink
-rm -f /var/lib/jenkins/jobs
-ln -s $CUR_DIR/jobs /var/lib/jenkins/jobs
-
-# List of plugins
-PLUGINS=http://hudson-ci.org/downloads/plugins/build-timeout/1.6/build-timeout.hpi,http://mirrors.jenkins-ci.org/plugins/git/1.1.12/git.hpi,http://hudson-ci.org/downloads/plugins/global-build-stats/1.2/global-build-stats.hpi,http://hudson-ci.org/downloads/plugins/greenballs/1.10/greenballs.hpi,http://download.hudson-labs.org/plugins/console-column-plugin/1.0/console-column-plugin.hpi
-
-# Configure plugins
-for plugin in ${PLUGINS//,/ }; do
-    name=`basename $plugin`
-    dest=/var/lib/jenkins/plugins/$name
-    if [ ! -e $dest ]; then
-        curl -L $plugin -o $dest
-    fi
-done
-
-# Restart jenkins
-/etc/init.d/jenkins stop || true
-/etc/init.d/jenkins start
diff --git a/tools/jenkins/jenkins_home/clean.sh b/tools/jenkins/jenkins_home/clean.sh
deleted file mode 100755
index eb03022..0000000
--- a/tools/jenkins/jenkins_home/clean.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/bash
-# This script is not yet for general consumption.
-
-set -o errexit
-
-if [ ! "$FORCE" = "yes" ]; then
-    echo "FORCE not set to 'yes'.  Make sure this is something you really want to do.  Exiting."
-    exit 1
-fi
-
-virsh list | cut -d " " -f1 | grep -v "-" | egrep -e "[0-9]" | xargs -n 1 virsh destroy || true
-virsh net-list | grep active | cut -d " " -f1 | xargs -n 1 virsh net-destroy || true
-killall dnsmasq || true
-if [ "$CLEAN" = "yes" ]; then
-    rm -rf jobs
-fi
-rm /var/lib/jenkins/jobs
-git checkout -f
-git fetch
-git merge origin/jenkins
-./build_jenkins.sh
diff --git a/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/config.xml b/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/config.xml
deleted file mode 100644
index 94c51f5..0000000
--- a/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/config.xml
+++ /dev/null
@@ -1,82 +0,0 @@
-<?xml version='1.0' encoding='UTF-8'?>
-<matrix-project>
-  <actions/>
-  <description></description>
-  <keepDependencies>false</keepDependencies>
-  <properties>
-    <hudson.model.ParametersDefinitionProperty>
-      <parameterDefinitions>
-        <hudson.model.StringParameterDefinition>
-          <name>RC</name>
-          <description></description>
-          <defaultValue></defaultValue>
-        </hudson.model.StringParameterDefinition>
-      </parameterDefinitions>
-    </hudson.model.ParametersDefinitionProperty>
-  </properties>
-  <scm class="hudson.plugins.git.GitSCM">
-    <configVersion>2</configVersion>
-    <userRemoteConfigs>
-      <hudson.plugins.git.UserRemoteConfig>
-        <name>origin</name>
-        <refspec>+refs/heads/*:refs/remotes/origin/*</refspec>
-        <url>git://github.com/cloudbuilders/devstack.git</url>
-      </hudson.plugins.git.UserRemoteConfig>
-    </userRemoteConfigs>
-    <branches>
-      <hudson.plugins.git.BranchSpec>
-        <name>master</name>
-      </hudson.plugins.git.BranchSpec>
-    </branches>
-    <recursiveSubmodules>false</recursiveSubmodules>
-    <doGenerateSubmoduleConfigurations>false</doGenerateSubmoduleConfigurations>
-    <authorOrCommitter>false</authorOrCommitter>
-    <clean>false</clean>
-    <wipeOutWorkspace>false</wipeOutWorkspace>
-    <pruneBranches>false</pruneBranches>
-    <remotePoll>false</remotePoll>
-    <buildChooser class="hudson.plugins.git.util.DefaultBuildChooser"/>
-    <gitTool>Default</gitTool>
-    <submoduleCfg class="list"/>
-    <relativeTargetDir></relativeTargetDir>
-    <excludedRegions></excludedRegions>
-    <excludedUsers></excludedUsers>
-    <gitConfigName></gitConfigName>
-    <gitConfigEmail></gitConfigEmail>
-    <skipTag>false</skipTag>
-    <scmName></scmName>
-  </scm>
-  <canRoam>true</canRoam>
-  <disabled>false</disabled>
-  <blockBuildWhenDownstreamBuilding>false</blockBuildWhenDownstreamBuilding>
-  <blockBuildWhenUpstreamBuilding>false</blockBuildWhenUpstreamBuilding>
-  <triggers class="vector"/>
-  <concurrentBuild>false</concurrentBuild>
-  <axes>
-    <hudson.matrix.TextAxis>
-      <name>ADAPTER</name>
-      <values>
-        <string>euca</string>
-        <string>floating_ips</string>
-      </values>
-    </hudson.matrix.TextAxis>
-  </axes>
-  <builders>
-    <hudson.tasks.Shell>
-      <command>sed -i &apos;s/) 2&gt;&amp;1 | tee &quot;${LOGFILE}&quot;/)/&apos; stack.sh</command>
-    </hudson.tasks.Shell>
-    <hudson.tasks.Shell>
-      <command>set -o errexit
-cd tools/jenkins
-sudo ./build_configuration.sh $EXECUTOR_NUMBER kvm $ADAPTER &quot;$RC&quot;</command>
-    </hudson.tasks.Shell>
-    <hudson.tasks.Shell>
-      <command>set -o errexit
-cd tools/jenkins
-./run_test.sh $EXECUTOR_NUMBER $ADAPTER $RC &quot;$RC&quot;</command>
-    </hudson.tasks.Shell>
-  </builders>
-  <publishers/>
-  <buildWrappers/>
-  <runSequentially>false</runSequentially>
-</matrix-project>
diff --git a/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/configurations/axis-ADAPTER/euca/config.xml b/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/configurations/axis-ADAPTER/euca/config.xml
deleted file mode 100644
index 0be70a5..0000000
--- a/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/configurations/axis-ADAPTER/euca/config.xml
+++ /dev/null
@@ -1,15 +0,0 @@
-<?xml version='1.0' encoding='UTF-8'?>
-<matrix-config>
-  <keepDependencies>false</keepDependencies>
-  <properties/>
-  <scm class="hudson.scm.NullSCM"/>
-  <canRoam>false</canRoam>
-  <disabled>false</disabled>
-  <blockBuildWhenDownstreamBuilding>false</blockBuildWhenDownstreamBuilding>
-  <blockBuildWhenUpstreamBuilding>false</blockBuildWhenUpstreamBuilding>
-  <triggers class="vector"/>
-  <concurrentBuild>false</concurrentBuild>
-  <builders/>
-  <publishers/>
-  <buildWrappers/>
-</matrix-config>
\ No newline at end of file
diff --git a/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/configurations/axis-ADAPTER/floatingips/config.xml b/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/configurations/axis-ADAPTER/floatingips/config.xml
deleted file mode 100644
index 0be70a5..0000000
--- a/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/configurations/axis-ADAPTER/floatingips/config.xml
+++ /dev/null
@@ -1,15 +0,0 @@
-<?xml version='1.0' encoding='UTF-8'?>
-<matrix-config>
-  <keepDependencies>false</keepDependencies>
-  <properties/>
-  <scm class="hudson.scm.NullSCM"/>
-  <canRoam>false</canRoam>
-  <disabled>false</disabled>
-  <blockBuildWhenDownstreamBuilding>false</blockBuildWhenDownstreamBuilding>
-  <blockBuildWhenUpstreamBuilding>false</blockBuildWhenUpstreamBuilding>
-  <triggers class="vector"/>
-  <concurrentBuild>false</concurrentBuild>
-  <builders/>
-  <publishers/>
-  <buildWrappers/>
-</matrix-config>
\ No newline at end of file
diff --git a/tools/jenkins/jenkins_home/jobs/diablo-xs_ha/config.xml b/tools/jenkins/jenkins_home/jobs/diablo-xs_ha/config.xml
deleted file mode 100644
index 49a57f0..0000000
--- a/tools/jenkins/jenkins_home/jobs/diablo-xs_ha/config.xml
+++ /dev/null
@@ -1,88 +0,0 @@
-<?xml version='1.0' encoding='UTF-8'?>
-<matrix-project>
-  <actions/>
-  <description>In order for this to work, you must create a /var/lib/jenkins/xenrc file as described in README.md</description>
-  <keepDependencies>false</keepDependencies>
-  <properties>
-    <hudson.model.ParametersDefinitionProperty>
-      <parameterDefinitions>
-        <hudson.model.StringParameterDefinition>
-          <name>RC</name>
-          <description></description>
-          <defaultValue></defaultValue>
-        </hudson.model.StringParameterDefinition>
-      </parameterDefinitions>
-    </hudson.model.ParametersDefinitionProperty>
-  </properties>
-  <scm class="hudson.plugins.git.GitSCM">
-    <configVersion>2</configVersion>
-    <userRemoteConfigs>
-      <hudson.plugins.git.UserRemoteConfig>
-        <name>origin</name>
-        <refspec>+refs/heads/*:refs/remotes/origin/*</refspec>
-        <url>git://github.com/cloudbuilders/devstack.git</url>
-      </hudson.plugins.git.UserRemoteConfig>
-    </userRemoteConfigs>
-    <branches>
-      <hudson.plugins.git.BranchSpec>
-        <name>master</name>
-      </hudson.plugins.git.BranchSpec>
-    </branches>
-    <recursiveSubmodules>false</recursiveSubmodules>
-    <doGenerateSubmoduleConfigurations>false</doGenerateSubmoduleConfigurations>
-    <authorOrCommitter>false</authorOrCommitter>
-    <clean>false</clean>
-    <wipeOutWorkspace>false</wipeOutWorkspace>
-    <pruneBranches>false</pruneBranches>
-    <remotePoll>false</remotePoll>
-    <buildChooser class="hudson.plugins.git.util.DefaultBuildChooser"/>
-    <gitTool>Default</gitTool>
-    <submoduleCfg class="list"/>
-    <relativeTargetDir></relativeTargetDir>
-    <excludedRegions></excludedRegions>
-    <excludedUsers></excludedUsers>
-    <gitConfigName></gitConfigName>
-    <gitConfigEmail></gitConfigEmail>
-    <skipTag>false</skipTag>
-    <scmName></scmName>
-  </scm>
-  <canRoam>true</canRoam>
-  <disabled>false</disabled>
-  <blockBuildWhenDownstreamBuilding>false</blockBuildWhenDownstreamBuilding>
-  <blockBuildWhenUpstreamBuilding>false</blockBuildWhenUpstreamBuilding>
-  <triggers class="vector"/>
-  <concurrentBuild>false</concurrentBuild>
-  <axes>
-    <hudson.matrix.TextAxis>
-      <name>ADAPTER</name>
-      <values>
-        <string>euca</string>
-        <string>floating_ips</string>
-      </values>
-    </hudson.matrix.TextAxis>
-  </axes>
-  <builders>
-    <hudson.tasks.Shell>
-      <command>sed -i &apos;s/) 2&gt;&amp;1 | tee &quot;${LOGFILE}&quot;/)/&apos; stack.sh</command>
-    </hudson.tasks.Shell>
-    <hudson.tasks.Shell>
-      <command>set -o errexit
-cd tools/jenkins
-sudo ./build_configuration.sh $EXECUTOR_NUMBER xs $ADAPTER &quot;$RC&quot;</command>
-    </hudson.tasks.Shell>
-    <hudson.tasks.Shell>
-      <command>#!/bin/bash
-set -o errexit
-set -o xtrace
-
-. localrc
-
-# Unlike kvm, ssh to the xen host to run tests, in case the test instance is launch with a host only network
-ssh root@$XEN_IP &quot;cd devstack &amp;&amp; . localrc &amp;&amp; cd tools/jenkins &amp;&amp; ./run_test.sh $EXECUTOR_NUMBER $ADAPTER &apos;$RC&apos;&quot;
-</command>
-    </hudson.tasks.Shell>
-  </builders>
-  <publishers/>
-  <buildWrappers/>
-  <runSequentially>true</runSequentially>
-</matrix-project>
diff --git a/tools/jenkins/jenkins_home/print_summary.py b/tools/jenkins/jenkins_home/print_summary.py
deleted file mode 100755
index 8be500b..0000000
--- a/tools/jenkins/jenkins_home/print_summary.py
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/usr/bin/python
-
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import json
-import sys
-import urllib
-
-
-def print_usage():
-    print("Usage: %s [jenkins_url (eg. http://50.56.12.202:8080/)]"
-          % sys.argv[0])
-    sys.exit()
-
-
-def fetch_blob(url):
-    return json.loads(urllib.urlopen(url + '/api/json').read())
-
-
-if len(sys.argv) < 2:
-    print_usage()
-
-BASE_URL = sys.argv[1]
-
-root = fetch_blob(BASE_URL)
-results = {}
-for job_url in root['jobs']:
-    job = fetch_blob(job_url['url'])
-    if job.get('activeConfigurations'):
-        (tag, name) = job['name'].split('-')
-        if not results.get(tag):
-            results[tag] = {}
-        if not results[tag].get(name):
-            results[tag][name] = []
-
-        for config_url in job['activeConfigurations']:
-            config = fetch_blob(config_url['url'])
-
-            log_url = ''
-            if config.get('lastBuild'):
-                log_url = config['lastBuild']['url'] + 'console'
-
-            results[tag][name].append({'test': config['displayName'],
-                                       'status': config['color'],
-                                       'logUrl': log_url,
-                                       'healthReport': config['healthReport']})
-
-print(json.dumps(results))
diff --git a/tools/jenkins/run_test.sh b/tools/jenkins/run_test.sh
deleted file mode 100755
index d2b8284..0000000
--- a/tools/jenkins/run_test.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-
-EXECUTOR_NUMBER=$1
-ADAPTER=$2
-RC=$3
-
-function usage {
-    echo "Usage: $0 - Run a test"
-    echo ""
-    echo "$0 [EXECUTOR_NUMBER] [ADAPTER] [RC (optional)]"
-    exit 1
-}
-
-# Validate inputs
-if [[ "$EXECUTOR_NUMBER" = "" || "$ADAPTER" = "" ]]; then
-    usage
-fi
-
-# Execute configuration script
-cd adapters && ./$ADAPTER.sh $EXECUTOR_NUMBER $ADAPTER "$RC"
diff --git a/tools/warm_apts_for_uec.sh b/tools/warm_apts_for_uec.sh
deleted file mode 100755
index c57fc2e..0000000
--- a/tools/warm_apts_for_uec.sh
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env bash
-
-# **warm_apts_for_uec.sh**
-
-# Echo commands
-set -o xtrace
-
-# Exit on error to stop unexpected errors
-set -o errexit
-
-# Keep track of the current directory
-TOOLS_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=`cd $TOOLS_DIR/..; pwd`
-
-# Change dir to top of devstack
-cd $TOP_DIR
-
-# Echo usage
-function usage {
-    echo "Cache OpenStack dependencies on a uec image to speed up performance."
-    echo ""
-    echo "Usage: $0 [full path to raw uec base image]"
-}
-
-# Make sure this is a raw image
-if ! qemu-img info $1 | grep -q "file format: raw"; then
-    usage
-    exit 1
-fi
-
-# Make sure we are in the correct dir
-if [ ! -d files/apts ]; then
-    echo "Please run this script from devstack/tools/"
-    exit 1
-fi
-
-# Mount the image
-STAGING_DIR=/tmp/`echo $1 | sed  "s/\//_/g"`.stage
-mkdir -p $STAGING_DIR
-umount $STAGING_DIR || true
-sleep 1
-mount -t ext4 -o loop $1 $STAGING_DIR
-
-# Make sure that base requirements are installed
-cp /etc/resolv.conf $STAGING_DIR/etc/resolv.conf
-
-# Perform caching on the base image to speed up subsequent runs
-chroot $STAGING_DIR apt-get update
-chroot $STAGING_DIR apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1`
-chroot $STAGING_DIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1` || true
-
-# Unmount
-umount $STAGING_DIR
diff --git a/tools/xen/devstackubuntupreseed.cfg b/tools/xen/devstackubuntupreseed.cfg
index 6a1ae89..94e6e96 100644
--- a/tools/xen/devstackubuntupreseed.cfg
+++ b/tools/xen/devstackubuntupreseed.cfg
@@ -297,9 +297,9 @@
 ### Apt setup
 # You can choose to install restricted and universe software, or to install
 # software from the backports repository.
-#d-i apt-setup/restricted boolean true
-#d-i apt-setup/universe boolean true
-#d-i apt-setup/backports boolean true
+d-i apt-setup/restricted boolean true
+d-i apt-setup/universe boolean true
+d-i apt-setup/backports boolean true
 # Uncomment this if you don't want to use a network mirror.
 #d-i apt-setup/use_mirror boolean false
 # Select which update services to use; define the mirrors to be used.
@@ -366,7 +366,7 @@
 # With a few exceptions for unusual partitioning setups, GRUB 2 is now the
 # default. If you need GRUB Legacy for some particular reason, then
 # uncomment this:
-#d-i grub-installer/grub2_instead_of_grub_legacy boolean false
+d-i grub-installer/grub2_instead_of_grub_legacy boolean false
 
 # This is fairly safe to set, it makes grub install automatically to the MBR
 # if no other operating system is detected on the machine.
diff --git a/tools/xen/functions b/tools/xen/functions
index ab0be84..4317796 100644
--- a/tools/xen/functions
+++ b/tools/xen/functions
@@ -93,7 +93,7 @@
     echo $pbd_path
 }
 
-function find_ip_by_name() {
+function find_ip_by_name {
     local guest_name="$1"
     local interface="$2"
 
@@ -121,7 +121,7 @@
     done
 }
 
-function _vm_uuid() {
+function _vm_uuid {
     local vm_name_label
 
     vm_name_label="$1"
@@ -129,14 +129,14 @@
     xe vm-list name-label="$vm_name_label" --minimal
 }
 
-function _create_new_network() {
+function _create_new_network {
     local name_label
     name_label=$1
 
     xe network-create name-label="$name_label"
 }
 
-function _multiple_networks_with_name() {
+function _multiple_networks_with_name {
     local name_label
     name_label=$1
 
@@ -144,21 +144,21 @@
     xe network-list name-label="$name_label" --minimal | grep -q ","
 }
 
-function _network_exists() {
+function _network_exists {
     local name_label
     name_label=$1
 
     ! [ -z "$(xe network-list name-label="$name_label" --minimal)" ]
 }
 
-function _bridge_exists() {
+function _bridge_exists {
     local bridge
     bridge=$1
 
     ! [ -z "$(xe network-list bridge="$bridge" --minimal)" ]
 }
 
-function _network_uuid() {
+function _network_uuid {
     local bridge_or_net_name
     bridge_or_net_name=$1
 
@@ -169,7 +169,7 @@
     fi
 }
 
-function add_interface() {
+function add_interface {
     local vm_name_label
     local bridge_or_network_name
 
@@ -185,7 +185,7 @@
     xe vif-create network-uuid=$net vm-uuid=$vm device=$device_number
 }
 
-function setup_network() {
+function setup_network {
     local bridge_or_net_name
     bridge_or_net_name=$1
 
@@ -204,7 +204,7 @@
     fi
 }
 
-function bridge_for() {
+function bridge_for {
     local bridge_or_net_name
     bridge_or_net_name=$1
 
@@ -215,29 +215,28 @@
     fi
 }
 
-function xenapi_ip_on() {
+function xenapi_ip_on {
     local bridge_or_net_name
     bridge_or_net_name=$1
 
     ifconfig $(bridge_for "$bridge_or_net_name") | grep "inet addr" | cut -d ":" -f2 | sed "s/ .*//"
 }
 
-function xenapi_is_listening_on() {
+function xenapi_is_listening_on {
     local bridge_or_net_name
     bridge_or_net_name=$1
 
     ! [ -z $(xenapi_ip_on "$bridge_or_net_name") ]
 }
 
-function parameter_is_specified() {
+function parameter_is_specified {
     local parameter_name
     parameter_name=$1
 
     compgen -v | grep "$parameter_name"
 }
 
-function append_kernel_cmdline()
-{
+function append_kernel_cmdline {
     local vm_name_label
     local kernel_args
 
@@ -252,8 +251,7 @@
     xe vm-param-set PV-args="$pv_args $kernel_args" uuid=$vm
 }
 
-function destroy_all_vifs_of()
-{
+function destroy_all_vifs_of {
     local vm_name_label
 
     vm_name_label="$1"
@@ -268,11 +266,11 @@
     unset IFS
 }
 
-function have_multiple_hosts() {
+function have_multiple_hosts {
     xe host-list --minimal | grep -q ","
 }
 
-function attach_network() {
+function attach_network {
     local bridge_or_net_name
 
     bridge_or_net_name="$1"
@@ -286,7 +284,7 @@
     xe network-attach uuid=$net host-uuid=$host
 }
 
-function set_vm_memory() {
+function set_vm_memory {
     local vm_name_label
     local memory
 
@@ -305,7 +303,7 @@
         uuid=$vm
 }
 
-function max_vcpus() {
+function max_vcpus {
     local vm_name_label
 
     vm_name_label="$1"
@@ -337,7 +335,7 @@
     xe vm-param-set uuid=$vm VCPUs-at-startup=$cpu_count
 }
 
-function get_domid() {
+function get_domid {
     local vm_name_label
 
     vm_name_label="$1"
diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh
index 44e8dc1..9bf8f73 100755
--- a/tools/xen/install_os_domU.sh
+++ b/tools/xen/install_os_domU.sh
@@ -171,6 +171,7 @@
     echo "Waiting for the VM to halt.  Progress in-VM can be checked with vncviewer:"
     mgmt_ip=$(echo $XENAPI_CONNECTION_URL | tr -d -c '1234567890.')
     domid=$(get_domid "$GUEST_NAME")
+    sleep 20 # Wait for the vnc-port to be written
     port=$(xenstore-read /local/domain/$domid/console/vnc-port)
     echo "vncviewer -via root@$mgmt_ip localhost:${port:2}"
     while true; do
@@ -207,6 +208,8 @@
             -e "s,\(d-i mirror/http/hostname string\).*,\1 $UBUNTU_INST_HTTP_HOSTNAME,g" \
             -e "s,\(d-i mirror/http/directory string\).*,\1 $UBUNTU_INST_HTTP_DIRECTORY,g" \
             -e "s,\(d-i mirror/http/proxy string\).*,\1 $UBUNTU_INST_HTTP_PROXY,g" \
+            -e "s,\(d-i passwd/root-password password\).*,\1 $GUEST_PASSWORD,g" \
+            -e "s,\(d-i passwd/root-password-again password\).*,\1 $GUEST_PASSWORD,g" \
             -i "${HTTP_SERVER_LOCATION}/devstackubuntupreseed.cfg"
     fi
 
@@ -382,10 +385,16 @@
     while ! ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "service devstack status | grep -q running"; do
         sleep 10
     done
-    echo -n "devstack is running"
+    echo -n "devstack service is running, waiting for stack.sh to start logging..."
+
+    while ! ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "test -e /tmp/devstack/log/stack.log"; do
+        sleep 10
+    done
     set -x
 
-    # Watch devstack's output
+    # Watch devstack's output (which doesn't start until stack.sh is running,
+    # but wait for run.sh (which starts stack.sh) to exit as that is what
+    # hopefully writes the succeded cookie.
     pid=`ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS pgrep run.sh`
     ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "tail --pid $pid -n +1 -f /tmp/devstack/log/stack.log"
 
diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh
index 2b5e418..cd189db 100755
--- a/tools/xen/prepare_guest.sh
+++ b/tools/xen/prepare_guest.sh
@@ -74,6 +74,7 @@
 apt-get update
 apt-get install -y cracklib-runtime curl wget ssh openssh-server tcpdump ethtool
 apt-get install -y curl wget ssh openssh-server python-pip git sudo python-netaddr
+apt-get install -y coreutils
 pip install xenapi
 
 # Install XenServer guest utilities
diff --git a/tools/xen/scripts/on_exit.sh b/tools/xen/scripts/on_exit.sh
index 2441e3d..2846dc4 100755
--- a/tools/xen/scripts/on_exit.sh
+++ b/tools/xen/scripts/on_exit.sh
@@ -3,7 +3,9 @@
 set -e
 set -o xtrace
 
-declare -a on_exit_hooks
+if [ -z "${on_exit_hooks:-}" ]; then
+    on_exit_hooks=()
+fi
 
 on_exit()
 {
diff --git a/tools/xen/xenrc b/tools/xen/xenrc
index 278bb9b..510c5f9 100644
--- a/tools/xen/xenrc
+++ b/tools/xen/xenrc
@@ -63,15 +63,15 @@
 PUB_NETMASK=${PUB_NETMASK:-255.255.255.0}
 
 # Ubuntu install settings
-UBUNTU_INST_RELEASE="saucy"
-UBUNTU_INST_TEMPLATE_NAME="Ubuntu 13.10 (64-bit) for DevStack"
+UBUNTU_INST_RELEASE="trusty"
+UBUNTU_INST_TEMPLATE_NAME="Ubuntu 14.04 (64-bit) for DevStack"
 # For 12.04 use "precise" and update template name
 # However, for 12.04, you should be using
 # XenServer 6.1 and later or XCP 1.6 or later
 # 11.10 is only really supported with XenServer 6.0.2 and later
 UBUNTU_INST_ARCH="amd64"
-UBUNTU_INST_HTTP_HOSTNAME="archive.ubuntu.net"
-UBUNTU_INST_HTTP_DIRECTORY="/ubuntu"
+UBUNTU_INST_HTTP_HOSTNAME="mirror.anl.gov"
+UBUNTU_INST_HTTP_DIRECTORY="/pub/ubuntu"
 UBUNTU_INST_HTTP_PROXY=""
 UBUNTU_INST_LOCALE="en_US"
 UBUNTU_INST_KEYBOARD="us"
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000..325adae
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,16 @@
+[tox]
+minversion = 1.6
+skipsdist = True
+envlist = bashate
+
+[testenv]
+usedevelop = False
+install_command = pip install {opts} {packages}
+
+[testenv:bashate]
+deps = bashate
+whitelist_externals = bash
+commands = bash -c "find {toxinidir} -not -wholename \*.tox/\* -and \( -name \*.sh -or -name \*rc -or -name functions\* -or \( -wholename lib/\* -and -not -name \*.md \) \) -print0 | xargs -0 bashate -v"
+
+[testenv:docs]
+commands = python setup.py build_sphinx
diff --git a/unstack.sh b/unstack.sh
index a5e7b87..adb6dc1 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -55,6 +55,7 @@
 source $TOP_DIR/lib/neutron
 source $TOP_DIR/lib/baremetal
 source $TOP_DIR/lib/ldap
+source $TOP_DIR/lib/dstat
 
 # Extras Source
 # --------------
@@ -122,9 +123,10 @@
     stop_horizon
 fi
 
-# Kill TLS proxies
+# Kill TLS proxies and cleanup certificates
 if is_service_enabled tls-proxy; then
-    killall stud
+    stop_tls_proxy
+    cleanup_CA
 fi
 
 SCSI_PERSIST_DIR=$CINDER_STATE_PATH/volumes/*
@@ -161,6 +163,8 @@
     cleanup_trove
 fi
 
+stop_dstat
+
 # Clean up the remainder of the screen processes
 SCREEN=$(which screen)
 if [[ -n "$SCREEN" ]]; then