Merge "Move to the python-saharaclient"
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index 8ab3505..761a077 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -347,6 +347,13 @@
 # Does the test environment support resizing? (boolean value)
 #resize=false
 
+# Does the test environment support pausing? (boolean value)
+#pause=true
+
+# Does the test environment support suspend/resume? (boolean
+# value)
+#suspend=true
+
 # Does the test environment support live migration available?
 # (boolean value)
 #live_migration=false
diff --git a/tempest/api/compute/servers/test_delete_server.py b/tempest/api/compute/servers/test_delete_server.py
index 5e011dd..7e34213 100644
--- a/tempest/api/compute/servers/test_delete_server.py
+++ b/tempest/api/compute/servers/test_delete_server.py
@@ -23,6 +23,8 @@
 
 
 class DeleteServersTestJSON(base.BaseV2ComputeTest):
+    pause_available = CONF.compute_feature_enabled.pause
+
     # NOTE: Server creations of each test class should be under 10
     # for preventing "Quota exceeded for instances"
 
@@ -57,6 +59,7 @@
         self.assertEqual('204', resp['status'])
         self.client.wait_for_server_termination(server['id'])
 
+    @testtools.skipIf(not pause_available, 'Pause is not available.')
     @test.attr(type='gate')
     def test_delete_server_while_in_pause_state(self):
         # Delete a server while it's VM state is Pause
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 21465d8..26a75a2 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -31,6 +31,8 @@
 
 class ServerActionsTestJSON(base.BaseV2ComputeTest):
     resize_available = CONF.compute_feature_enabled.resize
+    pause_available = CONF.compute_feature_enabled.pause
+    suspend_available = CONF.compute_feature_enabled.suspend
     run_ssh = CONF.compute.run_ssh
 
     def setUp(self):
@@ -351,6 +353,7 @@
 
         self.wait_for(self._get_output)
 
+    @testtools.skipIf(not pause_available, 'Pause is not available.')
     @test.attr(type='gate')
     def test_pause_unpause_server(self):
         resp, server = self.client.pause_server(self.server_id)
@@ -360,6 +363,7 @@
         self.assertEqual(202, resp.status)
         self.client.wait_for_server_status(self.server_id, 'ACTIVE')
 
+    @testtools.skipIf(not suspend_available, 'Suspend is not available.')
     @test.attr(type='gate')
     def test_suspend_resume_server(self):
         resp, server = self.client.suspend_server(self.server_id)
diff --git a/tempest/api/compute/servers/test_server_personality.py b/tempest/api/compute/servers/test_server_personality.py
index b7e4e38..ddfc1d5 100644
--- a/tempest/api/compute/servers/test_server_personality.py
+++ b/tempest/api/compute/servers/test_server_personality.py
@@ -60,6 +60,25 @@
         resp, server = self.create_test_server(personality=person)
         self.assertEqual('202', resp['status'])
 
+    @test.attr(type='gate')
+    def test_create_server_with_existent_personality_file(self):
+        # Any existing file that match specified file will be renamed to
+        # include the bak extension appended with a time stamp
+
+        # TODO(zhikunliu): will add validations when ssh instance validation
+        # re-factor is ready
+        file_contents = 'This is a test file.'
+        personality = [{'path': '/test.txt',
+                       'contents': base64.b64encode(file_contents)}]
+        resp, server = self.create_test_server(personality=personality,
+                                               wait_until="ACTIVE")
+        resp, image = self.create_image_from_server(server['id'],
+                                                    wait_until="ACTIVE")
+        resp, server = self.create_test_server(image_id=image['id'],
+                                               personality=personality,
+                                               wait_until="ACTIVE")
+        self.assertEqual('202', resp['status'])
+
 
 class ServerPersonalityTestXML(ServerPersonalityTestJSON):
     _interface = "xml"
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index 4cccbd6..cbfec5c 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -16,6 +16,8 @@
 import base64
 import sys
 
+import testtools
+
 from tempest.api.compute import base
 from tempest import clients
 from tempest.common.utils import data_utils
@@ -27,6 +29,8 @@
 
 
 class ServersNegativeTestJSON(base.BaseV2ComputeTest):
+    pause_available = CONF.compute_feature_enabled.pause
+    suspend_available = CONF.compute_feature_enabled.suspend
 
     def setUp(self):
         super(ServersNegativeTestJSON, self).setUp()
@@ -125,6 +129,7 @@
         self.assertRaises(exceptions.NotFound, self.client.reboot,
                           nonexistent_server, 'SOFT')
 
+    @testtools.skipIf(not pause_available, 'Pause is not available.')
     @test.attr(type=['negative', 'gate'])
     def test_pause_paused_server(self):
         # Pause a paused server.
@@ -304,6 +309,7 @@
         self.assertRaises(exceptions.NotFound, self.servers_client.stop,
                           nonexistent_server)
 
+    @testtools.skipIf(not pause_available, 'Pause is not available.')
     @test.attr(type=['negative', 'gate'])
     def test_pause_non_existent_server(self):
         # pause a non existent server
@@ -311,6 +317,7 @@
         self.assertRaises(exceptions.NotFound, self.client.pause_server,
                           nonexistent_server)
 
+    @testtools.skipIf(not pause_available, 'Pause is not available.')
     @test.attr(type=['negative', 'gate'])
     def test_unpause_non_existent_server(self):
         # unpause a non existent server
@@ -318,6 +325,7 @@
         self.assertRaises(exceptions.NotFound, self.client.unpause_server,
                           nonexistent_server)
 
+    @testtools.skipIf(not pause_available, 'Pause is not available.')
     @test.attr(type=['negative', 'gate'])
     def test_unpause_server_invalid_state(self):
         # unpause an active server.
@@ -325,6 +333,7 @@
                           self.client.unpause_server,
                           self.server_id)
 
+    @testtools.skipIf(not suspend_available, 'Suspend is not available.')
     @test.attr(type=['negative', 'gate'])
     def test_suspend_non_existent_server(self):
         # suspend a non existent server
@@ -332,6 +341,7 @@
         self.assertRaises(exceptions.NotFound, self.client.suspend_server,
                           nonexistent_server)
 
+    @testtools.skipIf(not suspend_available, 'Suspend is not available.')
     @test.attr(type=['negative', 'gate'])
     def test_suspend_server_invalid_state(self):
         # suspend a suspended server.
@@ -344,6 +354,7 @@
                           self.client.suspend_server,
                           self.server_id)
 
+    @testtools.skipIf(not suspend_available, 'Suspend is not available.')
     @test.attr(type=['negative', 'gate'])
     def test_resume_non_existent_server(self):
         # resume a non existent server
@@ -351,6 +362,7 @@
         self.assertRaises(exceptions.NotFound, self.client.resume_server,
                           nonexistent_server)
 
+    @testtools.skipIf(not suspend_available, 'Suspend is not available.')
     @test.attr(type=['negative', 'gate'])
     def test_resume_server_invalid_state(self):
         # resume an active server.
diff --git a/tempest/api/network/test_quotas.py b/tempest/api/network/admin/test_quotas.py
similarity index 86%
rename from tempest/api/network/test_quotas.py
rename to tempest/api/network/admin/test_quotas.py
index 38784d8..a307986 100644
--- a/tempest/api/network/test_quotas.py
+++ b/tempest/api/network/admin/test_quotas.py
@@ -15,12 +15,11 @@
 
 
 from tempest.api.network import base
-from tempest import clients
 from tempest.common.utils import data_utils
 from tempest import test
 
 
-class QuotasTest(base.BaseNetworkTest):
+class QuotasTest(base.BaseAdminNetworkTest):
     _interface = 'json'
 
     """
@@ -32,13 +31,9 @@
         update quotas for a specified tenant
         reset quotas to default values for a specified tenant
 
-    v2.0 of the API is assumed. It is also assumed that the following
-    option is defined in the [service_available] section of etc/tempest.conf:
-
-        neutron as True
-
-    Finally, it is assumed that the per-tenant quota extension API is
-    configured in /etc/neutron/neutron.conf as follows:
+    v2.0 of the API is assumed.
+    It is also assumed that the per-tenant quota extension API is configured
+    in /etc/neutron/neutron.conf as follows:
 
         quota_driver = neutron.db.quota_db.DbQuotaDriver
     """
@@ -49,9 +44,7 @@
         if not test.is_extension_enabled('quotas', 'network'):
             msg = "quotas extension not enabled."
             raise cls.skipException(msg)
-        admin_manager = clients.AdminManager()
-        cls.admin_client = admin_manager.network_client
-        cls.identity_admin_client = admin_manager.identity_client
+        cls.identity_admin_client = cls.os_adm.identity_client
 
     @test.attr(type='gate')
     def test_quotas(self):
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index 6178a1c..e79d23c 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -25,10 +25,10 @@
     _interface = "json"
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(VolumeMultiBackendTest, cls).setUpClass()
         if not CONF.volume_feature_enabled.multi_backend:
-            cls.tearDownClass()
             raise cls.skipException("Cinder multi-backend feature disabled")
 
         cls.backend1_name = CONF.volume.backend1_name
@@ -37,40 +37,36 @@
         cls.volume_client = cls.os_adm.volumes_client
         cls.volume_type_id_list = []
         cls.volume_id_list = []
-        try:
-            # Volume/Type creation (uses backend1_name)
-            type1_name = data_utils.rand_name('Type-')
-            vol1_name = data_utils.rand_name('Volume-')
-            extra_specs1 = {"volume_backend_name": cls.backend1_name}
-            resp, cls.type1 = cls.client.create_volume_type(
-                type1_name, extra_specs=extra_specs1)
-            cls.volume_type_id_list.append(cls.type1['id'])
 
-            resp, cls.volume1 = cls.volume_client.create_volume(
-                size=1, display_name=vol1_name, volume_type=type1_name)
-            cls.volume_id_list.append(cls.volume1['id'])
-            cls.volume_client.wait_for_volume_status(cls.volume1['id'],
+        # Volume/Type creation (uses backend1_name)
+        type1_name = data_utils.rand_name('Type-')
+        vol1_name = data_utils.rand_name('Volume-')
+        extra_specs1 = {"volume_backend_name": cls.backend1_name}
+        resp, cls.type1 = cls.client.create_volume_type(
+            type1_name, extra_specs=extra_specs1)
+        cls.volume_type_id_list.append(cls.type1['id'])
+
+        resp, cls.volume1 = cls.volume_client.create_volume(
+            size=1, display_name=vol1_name, volume_type=type1_name)
+        cls.volume_id_list.append(cls.volume1['id'])
+        cls.volume_client.wait_for_volume_status(cls.volume1['id'],
+                                                 'available')
+
+        if cls.backend1_name != cls.backend2_name:
+            # Volume/Type creation (uses backend2_name)
+            type2_name = data_utils.rand_name('Type-')
+            vol2_name = data_utils.rand_name('Volume-')
+            extra_specs2 = {"volume_backend_name": cls.backend2_name}
+            resp, cls.type2 = cls.client.create_volume_type(
+                type2_name, extra_specs=extra_specs2)
+            cls.volume_type_id_list.append(cls.type2['id'])
+
+            resp, cls.volume2 = cls.volume_client.create_volume(
+                size=1, display_name=vol2_name, volume_type=type2_name)
+            cls.volume_id_list.append(cls.volume2['id'])
+            cls.volume_client.wait_for_volume_status(cls.volume2['id'],
                                                      'available')
 
-            if cls.backend1_name != cls.backend2_name:
-                # Volume/Type creation (uses backend2_name)
-                type2_name = data_utils.rand_name('Type-')
-                vol2_name = data_utils.rand_name('Volume-')
-                extra_specs2 = {"volume_backend_name": cls.backend2_name}
-                resp, cls.type2 = cls.client.create_volume_type(
-                    type2_name, extra_specs=extra_specs2)
-                cls.volume_type_id_list.append(cls.type2['id'])
-
-                resp, cls.volume2 = cls.volume_client.create_volume(
-                    size=1, display_name=vol2_name, volume_type=type2_name)
-                cls.volume_id_list.append(cls.volume2['id'])
-                cls.volume_client.wait_for_volume_status(cls.volume2['id'],
-                                                         'available')
-        except Exception as e:
-            LOG.exception("setup failed: %s" % e)
-            cls.tearDownClass()
-            raise
-
     @classmethod
     def tearDownClass(cls):
         # volumes deletion
diff --git a/tempest/api/volume/admin/test_volumes_backup.py b/tempest/api/volume/admin/test_volumes_backup.py
index cd6d7a8..f9fbe18 100644
--- a/tempest/api/volume/admin/test_volumes_backup.py
+++ b/tempest/api/volume/admin/test_volumes_backup.py
@@ -27,6 +27,7 @@
     _interface = "json"
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(VolumesBackupsTest, cls).setUpClass()
 
diff --git a/tempest/api/volume/test_volume_metadata.py b/tempest/api/volume/test_volume_metadata.py
index e94c700..0d57d47 100644
--- a/tempest/api/volume/test_volume_metadata.py
+++ b/tempest/api/volume/test_volume_metadata.py
@@ -23,16 +23,13 @@
     _interface = "json"
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(VolumeMetadataTest, cls).setUpClass()
         # Create a volume
         cls.volume = cls.create_volume()
         cls.volume_id = cls.volume['id']
 
-    @classmethod
-    def tearDownClass(cls):
-        super(VolumeMetadataTest, cls).tearDownClass()
-
     def tearDown(self):
         # Update the metadata to {}
         self.volumes_client.update_volume_metadata(self.volume_id, {})
diff --git a/tempest/api/volume/test_volumes_list.py b/tempest/api/volume/test_volumes_list.py
index c356342..e2f7a38 100644
--- a/tempest/api/volume/test_volumes_list.py
+++ b/tempest/api/volume/test_volumes_list.py
@@ -56,6 +56,7 @@
                              [str_vol(v) for v in fetched_list]))
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(VolumesListTest, cls).setUpClass()
         cls.client = cls.volumes_client
@@ -65,24 +66,10 @@
         cls.volume_id_list = []
         cls.metadata = {'Type': 'work'}
         for i in range(3):
-            try:
-                volume = cls.create_volume(metadata=cls.metadata)
-
-                resp, volume = cls.client.get_volume(volume['id'])
-                cls.volume_list.append(volume)
-                cls.volume_id_list.append(volume['id'])
-            except Exception:
-                LOG.exception('Failed to create volume. %d volumes were '
-                              'created' % len(cls.volume_id_list))
-                if cls.volume_list:
-                    # We could not create all the volumes, though we were able
-                    # to create *some* of the volumes. This is typically
-                    # because the backing file size of the volume group is
-                    # too small.
-                    for volid in cls.volume_id_list:
-                        cls.client.delete_volume(volid)
-                        cls.client.wait_for_resource_deletion(volid)
-                raise
+            volume = cls.create_volume(metadata=cls.metadata)
+            resp, volume = cls.client.get_volume(volume['id'])
+            cls.volume_list.append(volume)
+            cls.volume_id_list.append(volume['id'])
 
     @classmethod
     def tearDownClass(cls):
diff --git a/tempest/api/volume/test_volumes_negative.py b/tempest/api/volume/test_volumes_negative.py
index 82924a5..a8b0a8d 100644
--- a/tempest/api/volume/test_volumes_negative.py
+++ b/tempest/api/volume/test_volumes_negative.py
@@ -25,6 +25,7 @@
     _interface = 'json'
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(VolumesNegativeTest, cls).setUpClass()
         cls.client = cls.volumes_client
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index 84c9501..2ce3a4f 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -24,14 +24,10 @@
     _interface = "json"
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(VolumesSnapshotTest, cls).setUpClass()
-        try:
-            cls.volume_origin = cls.create_volume()
-        except Exception:
-            LOG.exception("setup failed")
-            cls.tearDownClass()
-            raise
+        cls.volume_origin = cls.create_volume()
 
     @classmethod
     def tearDownClass(cls):
diff --git a/tempest/api/volume/v2/test_volumes_list.py b/tempest/api/volume/v2/test_volumes_list.py
index 4d2573b..41445d7 100644
--- a/tempest/api/volume/v2/test_volumes_list.py
+++ b/tempest/api/volume/v2/test_volumes_list.py
@@ -56,6 +56,7 @@
                              [str_vol(v) for v in fetched_list]))
 
     @classmethod
+    @test.safe_setup
     def setUpClass(cls):
         super(VolumesV2ListTestJSON, cls).setUpClass()
         cls.client = cls.volumes_client
@@ -65,23 +66,10 @@
         cls.volume_id_list = []
         cls.metadata = {'Type': 'work'}
         for i in range(3):
-            try:
-                volume = cls.create_volume(metadata=cls.metadata)
-                resp, volume = cls.client.get_volume(volume['id'])
-                cls.volume_list.append(volume)
-                cls.volume_id_list.append(volume['id'])
-            except Exception:
-                LOG.exception('Failed to create volume. %d volumes were '
-                              'created' % len(cls.volume_id_list))
-                if cls.volume_list:
-                    # We could not create all the volumes, though we were able
-                    # to create *some* of the volumes. This is typically
-                    # because the backing file size of the volume group is
-                    # too small.
-                    for volid in cls.volume_id_list:
-                        cls.client.delete_volume(volid)
-                        cls.client.wait_for_resource_deletion(volid)
-                raise
+            volume = cls.create_volume(metadata=cls.metadata)
+            resp, volume = cls.client.get_volume(volume['id'])
+            cls.volume_list.append(volume)
+            cls.volume_id_list.append(volume['id'])
 
     @classmethod
     def tearDownClass(cls):
diff --git a/tempest/api_schema/compute/v2/floating_ips.py b/tempest/api_schema/compute/v2/floating_ips.py
index 61582ec..648d0bf 100644
--- a/tempest/api_schema/compute/v2/floating_ips.py
+++ b/tempest/api_schema/compute/v2/floating_ips.py
@@ -44,3 +44,33 @@
         'required': ['floating_ips']
     }
 }
+
+floating_ip = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'floating_ip': {
+                'type': 'object',
+                'properties': {
+                    # NOTE: Now the type of 'id' is integer, but here allows
+                    # 'string' also because we will be able to change it to
+                    # 'uuid' in the future.
+                    'id': {'type': ['integer', 'string']},
+                    'pool': {'type': ['string', 'null']},
+                    'instance_id': {'type': ['integer', 'string', 'null']},
+                    'ip': {
+                        'type': 'string',
+                        'format': 'ip-address'
+                    },
+                    'fixed_ip': {
+                        'type': ['string', 'null'],
+                        'format': 'ip-address'
+                    }
+                },
+                'required': ['id', 'pool', 'instance_id', 'ip', 'fixed_ip']
+            }
+        },
+        'required': ['floating_ip']
+    }
+}
diff --git a/tempest/api_schema/compute/v2/images.py b/tempest/api_schema/compute/v2/images.py
index fb4804d..41b8fff 100644
--- a/tempest/api_schema/compute/v2/images.py
+++ b/tempest/api_schema/compute/v2/images.py
@@ -79,3 +79,38 @@
         'required': ['image']
     }
 }
+
+list_images = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'images': {
+                'type': 'array',
+                'items': {
+                    'type': 'object',
+                    'properties': {
+                        'id': {'type': 'string'},
+                        'links': {
+                            'type': 'array',
+                            'items': {
+                                'type': 'object',
+                                'properties': {
+                                    'href': {
+                                        'type': 'string',
+                                        'format': 'uri'
+                                    },
+                                    'rel': {'type': 'string'}
+                                },
+                                'required': ['href', 'rel']
+                            }
+                        },
+                        'name': {'type': 'string'}
+                    },
+                    'required': ['id', 'links', 'name']
+                }
+            }
+        },
+        'required': ['images']
+    }
+}
diff --git a/tempest/config.py b/tempest/config.py
index 471a0de..b0945bb 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -249,6 +249,12 @@
     cfg.BoolOpt('resize',
                 default=False,
                 help="Does the test environment support resizing?"),
+    cfg.BoolOpt('pause',
+                default=True,
+                help="Does the test environment support pausing?"),
+    cfg.BoolOpt('suspend',
+                default=True,
+                help="Does the test environment support suspend/resume?"),
     cfg.BoolOpt('live_migration',
                 default=False,
                 help="Does the test environment support live migration "
diff --git a/tempest/scenario/test_load_balancer_basic.py b/tempest/scenario/test_load_balancer_basic.py
index ce2c66f..f7a3d6f 100644
--- a/tempest/scenario/test_load_balancer_basic.py
+++ b/tempest/scenario/test_load_balancer_basic.py
@@ -215,6 +215,7 @@
         self.assertEqual(5, resp.count("server1\n"))
         self.assertEqual(5, resp.count("server2\n"))
 
+    @test.skip_because(bug='1295165')
     @test.attr(type='smoke')
     @test.services('compute', 'network')
     def test_load_balancer_basic(self):
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
new file mode 100644
index 0000000..e7e97b5
--- /dev/null
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -0,0 +1,193 @@
+# Copyright 2014 IBM Corp.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.common import debug
+from tempest.common.utils import data_utils
+from tempest import config
+from tempest.openstack.common import log as logging
+from tempest.scenario import manager
+from tempest.test import services
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+class TestNetworkAdvancedServerOps(manager.NetworkScenarioTest):
+
+    """
+    This test case checks VM connectivity after some advanced
+    instance operations executed:
+
+     * Stop/Start an instance
+     * Reboot an instance
+     * Rebuild an instance
+     * Pause/Unpause an instance
+     * Suspend/Resume an instance
+     * Resize an instance
+    """
+
+    @classmethod
+    def setUpClass(cls):
+        super(TestNetworkAdvancedServerOps, cls).setUpClass()
+        cls.check_preconditions()
+        if not (CONF.network.tenant_networks_reachable
+                or CONF.network.public_network_id):
+            msg = ('Either tenant_networks_reachable must be "true", or '
+                   'public_network_id must be defined.')
+            cls.enabled = False
+            raise cls.skipException(msg)
+
+    def cleanup_wrapper(self, resource):
+        self.cleanup_resource(resource, self.__class__.__name__)
+
+    def setUp(self):
+        super(TestNetworkAdvancedServerOps, self).setUp()
+        key_name = data_utils.rand_name('keypair-smoke-')
+        self.keypair = self.create_keypair(name=key_name)
+        self.addCleanup(self.cleanup_wrapper, self.keypair)
+        security_group =\
+            self._create_security_group_neutron(tenant_id=self.tenant_id)
+        self.addCleanup(self.cleanup_wrapper, security_group)
+        network = self._create_network(self.tenant_id)
+        self.addCleanup(self.cleanup_wrapper, network)
+        router = self._get_router(self.tenant_id)
+        self.addCleanup(self.cleanup_wrapper, router)
+        subnet = self._create_subnet(network)
+        self.addCleanup(self.cleanup_wrapper, subnet)
+        subnet.add_to_router(router.id)
+        public_network_id = CONF.network.public_network_id
+        create_kwargs = {
+            'nics': [
+                {'net-id': network.id},
+            ],
+            'key_name': self.keypair.name,
+            'security_groups': [security_group.name],
+        }
+        server_name = data_utils.rand_name('server-smoke-%d-')
+        self.server = self.create_server(name=server_name,
+                                         create_kwargs=create_kwargs)
+        self.addCleanup(self.cleanup_wrapper, self.server)
+        self.floating_ip = self._create_floating_ip(self.server,
+                                                    public_network_id)
+        self.addCleanup(self.cleanup_wrapper, self.floating_ip)
+
+    def _check_tenant_network_connectivity(self, server,
+                                           username,
+                                           private_key,
+                                           should_connect=True):
+        if not CONF.network.tenant_networks_reachable:
+            msg = 'Tenant networks not configured to be reachable.'
+            LOG.info(msg)
+            return
+        # The target login is assumed to have been configured for
+        # key-based authentication by cloud-init.
+        try:
+            for net_name, ip_addresses in server.networks.iteritems():
+                for ip_address in ip_addresses:
+                    self._check_vm_connectivity(ip_address,
+                                                username,
+                                                private_key,
+                                                should_connect=should_connect)
+        except Exception:
+            LOG.exception('Tenant network connectivity check failed')
+            self._log_console_output(servers=[server])
+            debug.log_ip_ns()
+            raise
+
+    def _check_public_network_connectivity(self, floating_ip,
+                                           username,
+                                           private_key,
+                                           should_connect=True):
+        # The target login is assumed to have been configured for
+        # key-based authentication by cloud-init.
+        try:
+            self._check_vm_connectivity(floating_ip, username, private_key,
+                                        should_connect=should_connect)
+        except Exception:
+            LOG.exception("Public network connectivity check failed")
+            debug.log_ip_ns()
+            raise
+
+    def _check_network_connectivity(self, should_connect=True):
+        username = CONF.compute.image_ssh_user
+        private_key = self.keypair.private_key
+        self._check_tenant_network_connectivity(self.server,
+                                                username,
+                                                private_key,
+                                                should_connect=should_connect)
+        floating_ip = self.floating_ip.floating_ip_address
+        self._check_public_network_connectivity(floating_ip,
+                                                username,
+                                                private_key,
+                                                should_connect=should_connect)
+
+    def _wait_server_status_and_check_network_connectivity(self):
+        self.status_timeout(self.compute_client.servers, self.server.id,
+                            'ACTIVE')
+        self._check_network_connectivity()
+
+    @services('compute', 'network')
+    def test_server_connectivity_stop_start(self):
+        self.server.stop()
+        self.status_timeout(self.compute_client.servers, self.server.id,
+                            'SHUTOFF')
+        self._check_network_connectivity(should_connect=False)
+        self.server.start()
+        self._wait_server_status_and_check_network_connectivity()
+
+    @services('compute', 'network')
+    def test_server_connectivity_reboot(self):
+        self.server.reboot()
+        self._wait_server_status_and_check_network_connectivity()
+
+    @services('compute', 'network')
+    def test_server_connectivity_rebuild(self):
+        image_ref_alt = CONF.compute.image_ref_alt
+        self.server.rebuild(image_ref_alt)
+        self._wait_server_status_and_check_network_connectivity()
+
+    @services('compute', 'network')
+    def test_server_connectivity_pause_unpause(self):
+        self.server.pause()
+        self.status_timeout(self.compute_client.servers, self.server.id,
+                            'PAUSED')
+        self._check_network_connectivity(should_connect=False)
+        self.server.unpause()
+        self._wait_server_status_and_check_network_connectivity()
+
+    @services('compute', 'network')
+    def test_server_connectivity_suspend_resume(self):
+        self.server.suspend()
+        self.status_timeout(self.compute_client.servers, self.server.id,
+                            'SUSPENDED')
+        self._check_network_connectivity(should_connect=False)
+        self.server.resume()
+        self._wait_server_status_and_check_network_connectivity()
+
+    @services('compute', 'network')
+    def test_server_connectivity_resize(self):
+        if not CONF.compute_feature_enabled.resize:
+            msg = "Skipping test - resize not available on this host"
+            raise self.skipException(msg)
+        resize_flavor = CONF.compute.flavor_ref_alt
+        if resize_flavor == CONF.compute.flavor_ref:
+            msg = "Skipping test - flavor_ref and flavor_ref_alt are identical"
+            raise self.skipException(msg)
+        resize_flavor = CONF.compute.flavor_ref_alt
+        self.server.resize(resize_flavor)
+        self.status_timeout(self.compute_client.servers, self.server.id,
+                            'VERIFY_RESIZE')
+        self.server.confirm_resize()
+        self._wait_server_status_and_check_network_connectivity()
diff --git a/tempest/services/compute/json/floating_ips_client.py b/tempest/services/compute/json/floating_ips_client.py
index 2a7e25a..273ada6 100644
--- a/tempest/services/compute/json/floating_ips_client.py
+++ b/tempest/services/compute/json/floating_ips_client.py
@@ -47,6 +47,7 @@
         body = json.loads(body)
         if resp.status == 404:
             raise exceptions.NotFound(body)
+        self.validate_response(schema.floating_ip, resp, body)
         return resp, body['floating_ip']
 
     def create_floating_ip(self, pool_name=None):
@@ -56,6 +57,7 @@
         post_body = json.dumps(post_body)
         resp, body = self.post(url, post_body)
         body = json.loads(body)
+        self.validate_response(schema.floating_ip, resp, body)
         return resp, body['floating_ip']
 
     def delete_floating_ip(self, floating_ip_id):
diff --git a/tempest/services/compute/json/images_client.py b/tempest/services/compute/json/images_client.py
index deb9c93..2f128f2 100644
--- a/tempest/services/compute/json/images_client.py
+++ b/tempest/services/compute/json/images_client.py
@@ -58,6 +58,7 @@
 
         resp, body = self.get(url)
         body = json.loads(body)
+        self.validate_response(schema.list_images, resp, body)
         return resp, body['images']
 
     def list_images_with_detail(self, params=None):
diff --git a/tools/verify_tempest_config.py b/tools/verify_tempest_config.py
index 79e1fe3..aa92c0b 100755
--- a/tools/verify_tempest_config.py
+++ b/tools/verify_tempest_config.py
@@ -138,21 +138,63 @@
                           "enabled extensions" % (service, extension))
 
 
-def check_service_availability(service):
-    if service == 'nova_v3':
-        service = 'nova'
-    return getattr(CONF.service_available, service)
+def check_service_availability(os):
+    services = []
+    avail_services = []
+    codename_match = {
+        'volume': 'cinder',
+        'network': 'neutron',
+        'image': 'glance',
+        'object_storage': 'swift',
+        'compute': 'nova',
+        'orchestration': 'heat',
+        'metering': 'ceilometer',
+        'telemetry': 'ceilometer',
+        'data_processing': 'savanna',
+        'baremetal': 'ironic',
+        'identity': 'keystone'
+
+    }
+    # Get catalog list for endpoints to use for validation
+    __, endpoints = os.endpoints_client.list_endpoints()
+    for endpoint in endpoints:
+        __, service = os.service_client.get_service(endpoint['service_id'])
+        services.append(service['type'])
+    # Pull all catalog types from config file and compare against endpoint list
+    for cfgname in dir(CONF._config):
+        cfg = getattr(CONF, cfgname)
+        catalog_type = getattr(cfg, 'catalog_type', None)
+        if not catalog_type:
+            continue
+        else:
+            if cfgname == 'identity':
+                # Keystone is a required service for tempest
+                continue
+            if catalog_type not in services:
+                if getattr(CONF.service_available, codename_match[cfgname]):
+                    print('Endpoint type %s not found either disable service '
+                          '%s or fix the catalog_type in the config file' % (
+                          catalog_type, codename_match[cfgname]))
+            else:
+                if not getattr(CONF.service_available,
+                               codename_match[cfgname]):
+                    print('Endpoint type %s is available, service %s should be'
+                          ' set as available in the config file.' % (
+                          catalog_type, codename_match[cfgname]))
+                else:
+                    avail_services.append(codename_match[cfgname])
+    return avail_services
 
 
 def main(argv):
     print('Running config verification...')
     os = clients.ComputeAdminManager(interface='json')
+    services = check_service_availability(os)
     results = {}
     for service in ['nova', 'nova_v3', 'cinder', 'neutron', 'swift']:
-        # TODO(mtreinish) make this a keystone endpoint check for available
-        # services
-        if not check_service_availability(service):
-            print("%s is not available" % service)
+        if service == 'nova_v3' and 'nova' not in services:
+            continue
+        elif service not in services:
             continue
         results = verify_extensions(os, service, results)
     verify_glance_api_versions(os)