Merge "Declare the config attribute in a simpler way"
diff --git a/tempest/README.rst b/tempest/README.rst
new file mode 100644
index 0000000..c41ef96
--- /dev/null
+++ b/tempest/README.rst
@@ -0,0 +1,98 @@
+Tempest Field Guide
+-----------
+
+Tempest is designed to be useful for a large number of different
+environments. This includes being useful for gating commits to
+OpenStack core projects, being used to validate OpenStack cloud
+implementations for both correctness, as well as a burn in tool for
+OpenStack clouds.
+
+As such Tempest tests come in many flavors, each with their own rules
+and guidelines. Below is the proposed Havana restructuring for Tempest
+to make this clear.
+
+tempest/
+   3rdparty/ - 3rd party api tests
+   api/ - API tests
+   cli/ - CLI tests
+   scenario/ - complex scenario tests
+   stress/ - stress tests
+   whitebox/ - white box testing
+
+Each of these directories contains different types of tests. What
+belongs in each directory, the rules and examples for good tests, are
+documented in a README.rst file in the directory.
+
+
+3rdparty
+------------
+
+Many openstack components include 3rdparty API support. It is
+completely legitmate for Tempest to include tests of 3rdparty APIs,
+but those should be kept seperate from the normal OpenStack
+validation.
+
+TODO: tempest/tests/boto should become tempest/3rdparty/boto
+
+
+api
+------------
+
+API tests are validation tests for the OpenStack API. They should not
+use the existing python clients for OpenStack, but should instead use
+the tempest implementations of clients. This allows us to test both
+XML and JSON. Having raw clients also lets us pass invalid JSON and
+XML to the APIs and see the results, something we could not get with
+the native clients.
+
+When it makes sense, API testing should be moved closer to the
+projects themselves, possibly as functional tests in their unit test
+frameworks.
+
+TODO: The bulk of tempest/tests should move to tempest/api
+
+
+cli
+------------
+
+CLI tests use the openstack CLI to interact with the OpenStack
+cloud. CLI testing in unit tests is somewhat difficult because unlike
+server testing, there is no access to server code to
+instantiate. Tempest seems like a logical place for this, as it
+prereqs having a running OpenStack cloud.
+
+TODO: the top level cli directory moves to tempest/cli
+
+
+scenario
+------------
+
+Scenario tests are complex "through path" tests for OpenStack
+functionality. They are typically a series of steps where complicated
+state requiring multiple services is set up exercised, and torn down.
+
+Scenario tests can and should use the OpenStack python clients.
+
+TODO: tests/network/test_network_basic_ops.py,
+tests/compute/servers/*_ops.py should move to tempest/scenario (others)
+
+
+stress
+-----------
+
+Stress tests are designed to stress an OpenStack environment by
+running a high workload against it and seeing what breaks. Tools may
+be provided to help detect breaks (stack traces in the logs).
+
+TODO: old stress tests deleted, new_stress that david is working on
+moves into here.
+
+
+whitebox
+----------
+
+Whitebox tests are tests which require access to the database of the
+target OpenStack machine to verify internal state after opperations
+are made. White box tests are allowed to use the python clients.
+
+TODO: collect out whitebox tests to this location.
diff --git a/tempest/clients.py b/tempest/clients.py
index 7b1e5cc..9b2c1f5 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -42,6 +42,7 @@
 from tempest.services.compute.json.servers_client import ServersClientJSON
 from tempest.services.compute.json.volumes_extensions_client import \
     VolumesExtensionsClientJSON
+from tempest.services.compute.xml.aggregates_client import AggregatesClientXML
 from tempest.services.compute.xml.availability_zone_client import \
     AvailabilityZoneClientXML
 from tempest.services.compute.xml.extensions_client import ExtensionsClientXML
@@ -201,6 +202,11 @@
     "xml": ServiceClientXML,
 }
 
+AGGREGATES_CLIENT = {
+    "json": AggregatesClientJSON,
+    "xml": AggregatesClientXML,
+}
+
 
 class Manager(object):
 
@@ -270,6 +276,7 @@
             self.availability_zone_client = \
                 AVAILABILITY_ZONE_CLIENT[interface](*client_args)
             self.service_client = SERVICE_CLIENT[interface](*client_args)
+            self.aggregates_client = AGGREGATES_CLIENT[interface](*client_args)
         except KeyError:
             msg = "Unsupported interface type `%s'" % interface
             raise exceptions.InvalidConfiguration(msg)
@@ -285,7 +292,6 @@
         self.custom_object_client = ObjectClientCustomizedHeader(*client_args)
         self.custom_account_client = \
             AccountClientCustomizedHeader(*client_args)
-        self.aggregates_client = AggregatesClientJSON(*client_args)
 
 
 class AltManager(Manager):
diff --git a/tempest/services/compute/xml/aggregates_client.py b/tempest/services/compute/xml/aggregates_client.py
new file mode 100644
index 0000000..0ef8e22
--- /dev/null
+++ b/tempest/services/compute/xml/aggregates_client.py
@@ -0,0 +1,103 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 NEC Corporation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from lxml import etree
+
+from tempest.common.rest_client import RestClientXML
+from tempest import exceptions
+from tempest.services.compute.xml.common import Document
+from tempest.services.compute.xml.common import Element
+from tempest.services.compute.xml.common import xml_to_json
+
+
+class AggregatesClientXML(RestClientXML):
+
+    def __init__(self, config, username, password, auth_url, tenant_name=None):
+        super(AggregatesClientXML, self).__init__(config, username, password,
+                                                  auth_url, tenant_name)
+        self.service = self.config.compute.catalog_type
+
+    def _format_aggregate(self, g):
+        agg = xml_to_json(g)
+        aggregate = {}
+        for key, value in agg.items():
+            if key == 'hosts':
+                aggregate['hosts'] = []
+                for k, v in value.items():
+                    aggregate['hosts'].append(v)
+            elif key == 'availability_zone':
+                aggregate[key] = None if value == 'None' else value
+            else:
+                aggregate[key] = value
+        return aggregate
+
+    def _parse_array(self, node):
+        return [self._format_aggregate(x) for x in node]
+
+    def list_aggregates(self):
+        """Get aggregate list."""
+        resp, body = self.get("os-aggregates", self.headers)
+        aggregates = self._parse_array(etree.fromstring(body))
+        return resp, aggregates
+
+    def get_aggregate(self, aggregate_id):
+        """Get details of the given aggregate."""
+        resp, body = self.get("os-aggregates/%s" % str(aggregate_id),
+                              self.headers)
+        aggregate = self._format_aggregate(etree.fromstring(body))
+        return resp, aggregate
+
+    def create_aggregate(self, name, availability_zone=None):
+        """Creates a new aggregate."""
+        post_body = Element("aggregate",
+                            name=name,
+                            availability_zone=availability_zone)
+        resp, body = self.post('os-aggregates',
+                               str(Document(post_body)),
+                               self.headers)
+        aggregate = self._format_aggregate(etree.fromstring(body))
+        return resp, aggregate
+
+    def delete_aggregate(self, aggregate_id):
+        """Deletes the given aggregate."""
+        return self.delete("os-aggregates/%s" % str(aggregate_id),
+                           self.headers)
+
+    def is_resource_deleted(self, id):
+        try:
+            self.get_aggregate(id)
+        except exceptions.NotFound:
+            return True
+        return False
+
+    def add_host(self, aggregate_id, host):
+        """Adds a host to the given aggregate."""
+        post_body = Element("add_host", host=host)
+        resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
+                               str(Document(post_body)),
+                               self.headers)
+        aggregate = self._format_aggregate(etree.fromstring(body))
+        return resp, aggregate
+
+    def remove_host(self, aggregate_id, host):
+        """Removes a host from the given aggregate."""
+        post_body = Element("remove_host", host=host)
+        resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
+                               str(Document(post_body)),
+                               self.headers)
+        aggregate = self._format_aggregate(etree.fromstring(body))
+        return resp, aggregate
diff --git a/tempest/test.py b/tempest/test.py
index 6af95f9..de255d5 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -60,6 +60,29 @@
             super(BaseTestCase, cls).setUpClass()
 
 
+def call_until_true(func, duration, sleep_for):
+    """
+    Call the given function until it returns True (and return True) or
+    until the specified duration (in seconds) elapses (and return
+    False).
+
+    :param func: A zero argument callable that returns True on success.
+    :param duration: The number of seconds for which to attempt a
+        successful call of the function.
+    :param sleep_for: The number of seconds to sleep after an unsuccessful
+                      invocation of the function.
+    """
+    now = time.time()
+    timeout = now + duration
+    while now < timeout:
+        if func():
+            return True
+        LOG.debug("Sleeping for %d seconds", sleep_for)
+        time.sleep(sleep_for)
+        now = time.time()
+    return False
+
+
 class TestCase(BaseTestCase):
     """Base test case class for all Tempest tests
 
@@ -94,57 +117,33 @@
         self.os_resources.remove(thing)
         del self.resource_keys[key]
 
-
-def call_until_true(func, duration, sleep_for):
-    """
-    Call the given function until it returns True (and return True) or
-    until the specified duration (in seconds) elapses (and return
-    False).
-
-    :param func: A zero argument callable that returns True on success.
-    :param duration: The number of seconds for which to attempt a successful
-                     call of the function.
-    :param sleep_for: The number of seconds to sleep after an unsuccessful
-                      invocation of the function.
-    """
-    now = time.time()
-    timeout = now + duration
-    while now < timeout:
-        if func():
-            return True
-        LOG.debug("Sleeping for %d seconds", sleep_for)
-        time.sleep(sleep_for)
-        now = time.time()
-    return False
-
-
-def status_timeout(testcase, things, thing_id, expected_status):
-    """
-    Given a thing and an expected status, do a loop, sleeping
-    for a configurable amount of time, checking for the
-    expected status to show. At any time, if the returned
-    status of the thing is ERROR, fail out.
-    """
-    def check_status():
-        # python-novaclient has resources available to its client
-        # that all implement a get() method taking an identifier
-        # for the singular resource to retrieve.
-        thing = things.get(thing_id)
-        new_status = thing.status
-        if new_status == 'ERROR':
-            testcase.fail("%s failed to get to expected status."
+    def status_timeout(self, things, thing_id, expected_status):
+        """
+        Given a thing and an expected status, do a loop, sleeping
+        for a configurable amount of time, checking for the
+        expected status to show. At any time, if the returned
+        status of the thing is ERROR, fail out.
+        """
+        def check_status():
+            # python-novaclient has resources available to its client
+            # that all implement a get() method taking an identifier
+            # for the singular resource to retrieve.
+            thing = things.get(thing_id)
+            new_status = thing.status
+            if new_status == 'ERROR':
+                self.fail("%s failed to get to expected status."
                           "In ERROR state."
                           % thing)
-        elif new_status == expected_status:
-            return True  # All good.
-        LOG.debug("Waiting for %s to get to %s status. "
-                  "Currently in %s status",
-                  thing, expected_status, new_status)
-    conf = config.TempestConfig()
-    if not call_until_true(check_status,
-                           conf.compute.build_timeout,
-                           conf.compute.build_interval):
-        testcase.fail("Timed out waiting for thing %s to become %s"
+            elif new_status == expected_status:
+                return True  # All good.
+            LOG.debug("Waiting for %s to get to %s status. "
+                      "Currently in %s status",
+                      thing, expected_status, new_status)
+        conf = config.TempestConfig()
+        if not call_until_true(check_status,
+                               conf.compute.build_timeout,
+                               conf.compute.build_interval):
+            self.fail("Timed out waiting for thing %s to become %s"
                       % (thing_id, expected_status))
 
 
diff --git a/tempest/tests/boto/test_ec2_instance_run.py b/tempest/tests/boto/test_ec2_instance_run.py
index 08dc330..b6b93d8 100644
--- a/tempest/tests/boto/test_ec2_instance_run.py
+++ b/tempest/tests/boto/test_ec2_instance_run.py
@@ -202,14 +202,13 @@
 
         re_search_wait(_output, text)
         part_lines = ssh.get_partitions().split('\n')
-        # "attaching" invalid EC2 state ! #1074901
         volume.attach(instance.id, "/dev/vdh")
 
         def _volume_state():
             volume.update(validate=True)
             return volume.status
 
-        #self.assertVolumeStatusWait(_volume_state, "in-use")  # #1074901
+        self.assertVolumeStatusWait(_volume_state, "in-use")
         re_search_wait(_volume_state, "in-use")
 
         #NOTE(afazekas):  Different Hypervisor backends names
@@ -229,9 +228,9 @@
 
         #TODO(afazekas): Resource compare to the flavor settings
 
-        volume.detach()  # "detaching" invalid EC2 status #1074901
+        volume.detach()
 
-        #self.assertVolumeStatusWait(_volume_state, "available")
+        self.assertVolumeStatusWait(_volume_state, "available")
         re_search_wait(_volume_state, "available")
         LOG.info("Volume %s state: %s", volume.id, volume.status)
 
diff --git a/tempest/tests/boto/test_ec2_volumes.py b/tempest/tests/boto/test_ec2_volumes.py
index dc8ff31..37a913e 100644
--- a/tempest/tests/boto/test_ec2_volumes.py
+++ b/tempest/tests/boto/test_ec2_volumes.py
@@ -39,7 +39,6 @@
         cls.client = cls.os.ec2api_client
         cls.zone = cls.client.get_good_zone()
 
-#NOTE(afazekas): as admin it can trigger the Bug #1074901
     @attr(type='smoke')
     def test_create_get_delete(self):
         # EC2 Create, get, delete Volume
diff --git a/tempest/tests/compute/admin/test_aggregates.py b/tempest/tests/compute/admin/test_aggregates.py
index 06acc41..07df77f 100644
--- a/tempest/tests/compute/admin/test_aggregates.py
+++ b/tempest/tests/compute/admin/test_aggregates.py
@@ -27,13 +27,14 @@
     Tests Aggregates API that require admin privileges
     """
 
+    _host_key = 'OS-EXT-SRV-ATTR:host'
     _interface = 'json'
 
     @classmethod
     def setUpClass(cls):
         super(AggregatesAdminTestJSON, cls).setUpClass()
         cls.client = cls.os_adm.aggregates_client
-        cls.user_client = cls.os.aggregates_client
+        cls.user_client = cls.aggregates_client
         cls.aggregate_name_prefix = 'test_aggregate_'
         cls.az_name_prefix = 'test_az_'
 
@@ -212,7 +213,7 @@
                                           availability_zone=az_name)
         servers_client.wait_for_server_status(server['id'], 'ACTIVE')
         resp, body = admin_servers_client.get_server(server['id'])
-        self.assertEqual(self.host, body['OS-EXT-SRV-ATTR:host'])
+        self.assertEqual(self.host, body[self._host_key])
 
     @attr(type='negative')
     def test_aggregate_add_non_exist_host(self):
@@ -254,3 +255,9 @@
         self.assertRaises(exceptions.Unauthorized,
                           self.user_client.remove_host,
                           aggregate['id'], self.host)
+
+
+class AggregatesAdminTestXML(AggregatesAdminTestJSON):
+    _host_key = (
+        '{http://docs.openstack.org/compute/ext/extended_status/api/v1.1}host')
+    _interface = 'xml'
diff --git a/tempest/tests/compute/base.py b/tempest/tests/compute/base.py
index 221cfb6..b313e0b 100644
--- a/tempest/tests/compute/base.py
+++ b/tempest/tests/compute/base.py
@@ -62,6 +62,7 @@
         cls.interfaces_client = os.interfaces_client
         cls.fixed_ips_client = os.fixed_ips_client
         cls.availability_zone_client = os.availability_zone_client
+        cls.aggregates_client = os.aggregates_client
         cls.build_interval = cls.config.compute.build_interval
         cls.build_timeout = cls.config.compute.build_timeout
         cls.ssh_user = cls.config.compute.ssh_user
diff --git a/tempest/tests/compute/floating_ips/test_floating_ips_actions.py b/tempest/tests/compute/floating_ips/test_floating_ips_actions.py
index d800fb5..5fe911f 100644
--- a/tempest/tests/compute/floating_ips/test_floating_ips_actions.py
+++ b/tempest/tests/compute/floating_ips/test_floating_ips_actions.py
@@ -53,8 +53,8 @@
     @classmethod
     def tearDownClass(cls):
         #Deleting the floating IP which is created in this method
-        super(FloatingIPsTestJSON, cls).tearDownClass()
         resp, body = cls.client.delete_floating_ip(cls.floating_ip_id)
+        super(FloatingIPsTestJSON, cls).tearDownClass()
 
     @attr(type='positive')
     def test_allocate_floating_ip(self):
diff --git a/tempest/tests/compute/images/test_images_whitebox.py b/tempest/tests/compute/images/test_images_whitebox.py
index 105a38a..9ec05dd 100644
--- a/tempest/tests/compute/images/test_images_whitebox.py
+++ b/tempest/tests/compute/images/test_images_whitebox.py
@@ -37,10 +37,10 @@
     @classmethod
     def tearDownClass(cls):
         """Delete images after a test is executed."""
-        super(ImagesWhiteboxTest, cls).tearDownClass()
         for image_id in cls.image_ids:
             cls.client.delete_image(image_id)
             cls.image_ids.remove(image_id)
+        super(ImagesWhiteboxTest, cls).tearDownClass()
 
     @classmethod
     def update_state(self, server_id, vm_state, task_state, deleted=0):
diff --git a/tempest/tests/compute/servers/test_server_advanced_ops.py b/tempest/tests/compute/servers/test_server_advanced_ops.py
index 8be9c54..ad859d0 100644
--- a/tempest/tests/compute/servers/test_server_advanced_ops.py
+++ b/tempest/tests/compute/servers/test_server_advanced_ops.py
@@ -66,18 +66,18 @@
 
         self.assertEqual(self.instance.status, 'BUILD')
         instance_id = self.get_resource('instance').id
-        test.status_timeout(
-            self, self.compute_client.servers, instance_id, 'ACTIVE')
+        self.status_timeout(
+            self.compute_client.servers, instance_id, 'ACTIVE')
         instance = self.get_resource('instance')
         instance_id = instance.id
         resize_flavor = self.config.compute.flavor_ref_alt
         LOG.debug("Resizing instance %s from flavor %s to flavor %s",
                   instance.id, instance.flavor, resize_flavor)
         instance.resize(resize_flavor)
-        test.status_timeout(self, self.compute_client.servers, instance_id,
+        self.status_timeout(self.compute_client.servers, instance_id,
                             'VERIFY_RESIZE')
 
         LOG.debug("Confirming resize of instance %s", instance_id)
         instance.confirm_resize()
-        test.status_timeout(
-            self, self.compute_client.servers, instance_id, 'ACTIVE')
+        self.status_timeout(
+            self.compute_client.servers, instance_id, 'ACTIVE')
diff --git a/tempest/tests/compute/servers/test_server_basic_ops.py b/tempest/tests/compute/servers/test_server_basic_ops.py
index e4e246a..fdbbd3c 100644
--- a/tempest/tests/compute/servers/test_server_basic_ops.py
+++ b/tempest/tests/compute/servers/test_server_basic_ops.py
@@ -101,8 +101,8 @@
 
     def wait_on_active(self):
         instance_id = self.get_resource('instance').id
-        test.status_timeout(
-            self, self.compute_client.servers, instance_id, 'ACTIVE')
+        self.status_timeout(
+            self.compute_client.servers, instance_id, 'ACTIVE')
 
     def pause_server(self):
         instance = self.get_resource('instance')
@@ -110,8 +110,8 @@
         LOG.debug("Pausing instance %s. Current status: %s",
                   instance_id, instance.status)
         instance.pause()
-        test.status_timeout(
-            self, self.compute_client.servers, instance_id, 'PAUSED')
+        self.status_timeout(
+            self.compute_client.servers, instance_id, 'PAUSED')
 
     def unpause_server(self):
         instance = self.get_resource('instance')
@@ -119,8 +119,8 @@
         LOG.debug("Unpausing instance %s. Current status: %s",
                   instance_id, instance.status)
         instance.unpause()
-        test.status_timeout(
-            self, self.compute_client.servers, instance_id, 'ACTIVE')
+        self.status_timeout(
+            self.compute_client.servers, instance_id, 'ACTIVE')
 
     def suspend_server(self):
         instance = self.get_resource('instance')
@@ -128,7 +128,7 @@
         LOG.debug("Suspending instance %s. Current status: %s",
                   instance_id, instance.status)
         instance.suspend()
-        test.status_timeout(self, self.compute_client.servers,
+        self.status_timeout(self.compute_client.servers,
                             instance_id, 'SUSPENDED')
 
     def resume_server(self):
@@ -137,8 +137,8 @@
         LOG.debug("Resuming instance %s. Current status: %s",
                   instance_id, instance.status)
         instance.resume()
-        test.status_timeout(
-            self, self.compute_client.servers, instance_id, 'ACTIVE')
+        self.status_timeout(
+            self.compute_client.servers, instance_id, 'ACTIVE')
 
     def terminate_instance(self):
         instance = self.get_resource('instance')
diff --git a/tempest/tests/compute/servers/test_server_rescue.py b/tempest/tests/compute/servers/test_server_rescue.py
index 04c5b27..862a86a 100644
--- a/tempest/tests/compute/servers/test_server_rescue.py
+++ b/tempest/tests/compute/servers/test_server_rescue.py
@@ -85,7 +85,6 @@
 
     @classmethod
     def tearDownClass(cls):
-        super(ServerRescueTestJSON, cls).tearDownClass()
         #Deleting the floating IP which is created in this method
         cls.floating_ips_client.delete_floating_ip(cls.floating_ip_id)
         client = cls.volumes_extensions_client
@@ -93,6 +92,7 @@
         client.delete_volume(str(cls.volume_to_detach['id']).strip())
         resp, cls.sg = cls.security_groups_client.delete_security_group(
             cls.sg_id)
+        super(ServerRescueTestJSON, cls).tearDownClass()
 
     def tearDown(self):
         super(ServerRescueTestJSON, self).tearDown()
diff --git a/tempest/tests/network/common.py b/tempest/tests/network/common.py
index 6246f54..6811acf 100644
--- a/tempest/tests/network/common.py
+++ b/tempest/tests/network/common.py
@@ -269,7 +269,7 @@
             self.set_resource(name, server)
         except AttributeError:
             self.fail("Server not successfully created.")
-        test.status_timeout(self, client.servers, server.id, 'ACTIVE')
+        self.status_timeout(client.servers, server.id, 'ACTIVE')
         # The instance retrieved on creation is missing network
         # details, necessitating retrieval after it becomes active to
         # ensure correct details.
diff --git a/tempest/tests/volume/admin/test_multi_backend.py b/tempest/tests/volume/admin/test_multi_backend.py
index 04007c9..3d5fae4 100644
--- a/tempest/tests/volume/admin/test_multi_backend.py
+++ b/tempest/tests/volume/admin/test_multi_backend.py
@@ -97,8 +97,6 @@
 
     @classmethod
     def tearDownClass(cls):
-        super(VolumeMultiBackendTest, cls).tearDownClass()
-
         ## volumes deletion
         for volume_id in cls.volume_id_list:
             cls.client.delete_volume(volume_id)
@@ -108,6 +106,8 @@
         for volume_type in cls.volume_type_list:
             cls.client2.delete_volume_type(volume_type)
 
+        super(VolumeMultiBackendTest, cls).tearDownClass()
+
     def test_multi_backend_enabled(self):
         # this test checks that multi backend is enabled for at least the
         # computes where the volumes created in setUp were made
diff --git a/tempest/tests/volume/admin/test_volume_types_extra_specs.py b/tempest/tests/volume/admin/test_volume_types_extra_specs.py
index c8cf8d9..1cd7653 100644
--- a/tempest/tests/volume/admin/test_volume_types_extra_specs.py
+++ b/tempest/tests/volume/admin/test_volume_types_extra_specs.py
@@ -30,8 +30,8 @@
 
     @classmethod
     def tearDownClass(cls):
-        super(VolumeTypesExtraSpecsTest, cls).tearDownClass()
         cls.client.delete_volume_type(cls.volume_type['id'])
+        super(VolumeTypesExtraSpecsTest, cls).tearDownClass()
 
     def test_volume_type_extra_specs_list(self):
         # List Volume types extra specs.
diff --git a/tempest/tests/volume/admin/test_volume_types_extra_specs_negative.py b/tempest/tests/volume/admin/test_volume_types_extra_specs_negative.py
index 13fcbbf..bd6e279 100644
--- a/tempest/tests/volume/admin/test_volume_types_extra_specs_negative.py
+++ b/tempest/tests/volume/admin/test_volume_types_extra_specs_negative.py
@@ -36,8 +36,8 @@
 
     @classmethod
     def tearDownClass(cls):
-        super(ExtraSpecsNegativeTest, cls).tearDownClass()
         cls.client.delete_volume_type(cls.volume_type['id'])
+        super(ExtraSpecsNegativeTest, cls).tearDownClass()
 
     def test_update_no_body(self):
         # Should not update volume type extra specs with no body
diff --git a/tempest/tests/volume/test_volumes_actions.py b/tempest/tests/volume/test_volumes_actions.py
index fb9b975..e6eb8d8 100644
--- a/tempest/tests/volume/test_volumes_actions.py
+++ b/tempest/tests/volume/test_volumes_actions.py
@@ -43,7 +43,6 @@
 
     @classmethod
     def tearDownClass(cls):
-        super(VolumesActionsTest, cls).tearDownClass()
         # Delete the test instance and volume
         cls.client.delete_volume(cls.volume['id'])
         cls.client.wait_for_resource_deletion(cls.volume['id'])
@@ -51,6 +50,8 @@
         cls.servers_client.delete_server(cls.server['id'])
         cls.client.wait_for_resource_deletion(cls.server['id'])
 
+        super(VolumesActionsTest, cls).tearDownClass()
+
     @attr(type='smoke')
     def test_attach_detach_volume_to_instance(self):
         # Volume is attached and detached successfully from an instance
diff --git a/tox.ini b/tox.ini
index 4a2f80e..565a9ad 100644
--- a/tox.ini
+++ b/tox.ini
@@ -9,17 +9,52 @@
          NOSE_OPENSTACK_YELLOW=3
          NOSE_OPENSTACK_SHOW_ELAPSED=1
          NOSE_OPENSTACK_STDOUT=1
-deps = -r{toxinidir}/tools/pip-requires
-       -r{toxinidir}/tools/test-requires
-commands = nosetests {posargs}
+
+[testenv:full]
+sitepackages = True
+setenv = VIRTUAL_ENV={envdir}
+         NOSE_WITH_OPENSTACK=1
+         NOSE_OPENSTACK_COLOR=1
+         NOSE_OPENSTACK_RED=15
+         NOSE_OPENSTACK_YELLOW=3
+         NOSE_OPENSTACK_SHOW_ELAPSED=1
+         NOSE_OPENSTACK_STDOUT=1
+commands =
+  nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit --xunit-file=nosetests-full.xml -sv tempest
+  nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit --xunit-file=nosetests-cli.xml -sv cli
+
+[testenv:smoke]
+sitepackages = True
+setenv = VIRTUAL_ENV={envdir}
+         NOSE_WITH_OPENSTACK=1
+         NOSE_OPENSTACK_COLOR=1
+         NOSE_OPENSTACK_RED=15
+         NOSE_OPENSTACK_YELLOW=3
+         NOSE_OPENSTACK_SHOW_ELAPSED=1
+         NOSE_OPENSTACK_STDOUT=1
+commands =
+   nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit -sv --attr=type=smoke --xunit-file=nosetests-smoke.xml tempest
+
 
 [testenv:coverage]
-commands = python -m tools/tempest_coverage -c start --combine
-           nosetests {posargs}
-           python -m tools/tempest_coverage -c report --html
+sitepackages = True
+setenv = VIRTUAL_ENV={envdir}
+         NOSE_WITH_OPENSTACK=1
+         NOSE_OPENSTACK_COLOR=1
+         NOSE_OPENSTACK_RED=15
+         NOSE_OPENSTACK_YELLOW=3
+         NOSE_OPENSTACK_SHOW_ELAPSED=1
+         NOSE_OPENSTACK_STDOUT=1
+commands =
+   python -m tools/tempest_coverage -c start --combine
+   nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit --xunit-file=nosetests-full.xml -sv tempest
+   nosetests --logging-format '%(asctime)-15s %(message)s' --with-xunit --xunit-file=nosetests-cli.xml -sv cli
+   python -m tools/tempest_coverage -c report --html
 
 [testenv:pep8]
 commands = flake8
+deps = -r{toxinidir}/tools/pip-requires
+       -r{toxinidir}/tools/test-requires
 
 [flake8]
 ignore = E125,H302,H404