Merge "Decorate volume.base functions - fix cleanup"
diff --git a/bindep.txt b/bindep.txt
index efd3a10..7d34939 100644
--- a/bindep.txt
+++ b/bindep.txt
@@ -5,7 +5,6 @@
 libffi-devel [platform:rpm]
 gcc [platform:rpm]
 gcc [platform:dpkg]
-python-dev [platform:dpkg]
 python-devel [platform:rpm]
 python3-dev [platform:dpkg]
 python3-devel [platform:rpm]
diff --git a/doc/source/overview.rst b/doc/source/overview.rst
index 2eaf72f..315255d 100644
--- a/doc/source/overview.rst
+++ b/doc/source/overview.rst
@@ -207,21 +207,21 @@
 is ``test_path=./tempest/test_discover`` which will only run test discover on the
 Tempest suite.
 
-Alternatively, there are the py27 and py36 tox jobs which will run the unit
-tests with the corresponding version of python.
+Alternatively, there is the py39 tox job which will run the unit tests with
+the corresponding version of python.
 
 One common activity is to just run a single test, you can do this with tox
-simply by specifying to just run py27 or py36 tests against a single test::
+simply by specifying to just run py39 tests against a single test::
 
-    $ tox -e py36 -- -n tempest.tests.test_microversions.TestMicroversionsTestsClass.test_config_version_none_23
+    $ tox -e py39 -- -n tempest.tests.test_microversions.TestMicroversionsTestsClass.test_config_version_none_23
 
 Or all tests in the test_microversions.py file::
 
-    $ tox -e py36 -- -n tempest.tests.test_microversions
+    $ tox -e py39 -- -n tempest.tests.test_microversions
 
 You may also use regular expressions to run any matching tests::
 
-    $ tox -e py36 -- test_microversions
+    $ tox -e py39 -- test_microversions
 
 Additionally, when running a single test, or test-file, the ``-n/--no-discover``
 argument is no longer required, however it may perform faster if included.
diff --git a/doc/source/plugins/plugin.rst b/doc/source/plugins/plugin.rst
index b1fd6f8..0771318 100644
--- a/doc/source/plugins/plugin.rst
+++ b/doc/source/plugins/plugin.rst
@@ -345,6 +345,8 @@
 plugin package on your system and then running Tempest inside a venv will not
 work.
 
-Tempest also exposes a tox job, all-plugin, which will setup a tox virtualenv
-with system site-packages enabled. This will let you leverage tox without
-requiring to manually install plugins in the tox venv before running tests.
+For example, you can use tox to install and run tests from a tempest plugin like
+this::
+
+    [~/tempest] $ tox -e venv-tempest -- pip install (path to the plugin directory)
+    [~/tempest] $ tox -e all
diff --git a/doc/source/supported_version.rst b/doc/source/supported_version.rst
index 4ca7f0d..3d221c9 100644
--- a/doc/source/supported_version.rst
+++ b/doc/source/supported_version.rst
@@ -9,9 +9,10 @@
 
 Tempest master supports the below OpenStack Releases:
 
-* Victoria
-* Ussuri
-* Train
+* Zed
+* Yoga
+* Xena
+* Wallaby
 
 For older OpenStack Release:
 
@@ -32,6 +33,5 @@
 
 Tempest master supports the below python versions:
 
-* Python 3.6
-* Python 3.7
 * Python 3.8
+* Python 3.9
diff --git a/releasenotes/notes/Switch-to-ecdsa-ssh-key-type-by-default-0425b5d5ec72c1c3.yaml b/releasenotes/notes/Switch-to-ecdsa-ssh-key-type-by-default-0425b5d5ec72c1c3.yaml
new file mode 100644
index 0000000..5fc316b
--- /dev/null
+++ b/releasenotes/notes/Switch-to-ecdsa-ssh-key-type-by-default-0425b5d5ec72c1c3.yaml
@@ -0,0 +1,12 @@
+---
+upgrade:
+  - |
+    As the version of cirros used in OpenStack CI does not support SHA-2
+    signatures for ssh, any connection from a FIPS enabled machine will fail
+    in case validation.ssh_key_type is set to rsa (the default until now).
+    Using ecdsa keys helps us avoid the mentioned issue.
+    From now on, the validation.ssh_key_type option will be set to ecdsa
+    by default for testing simplicity.
+    This change shouldn't have any drastic effect on any tempest consumer,
+    in case rsa ssh type is required in a consumer's scenario,
+    validation.ssh_key_type can be overridden to rsa.
diff --git a/releasenotes/notes/add-image-cache-apis-as-tempest-clients-fbcd186927a85e2f.yaml b/releasenotes/notes/add-image-cache-apis-as-tempest-clients-fbcd186927a85e2f.yaml
new file mode 100644
index 0000000..38cc9ac
--- /dev/null
+++ b/releasenotes/notes/add-image-cache-apis-as-tempest-clients-fbcd186927a85e2f.yaml
@@ -0,0 +1,6 @@
+---
+features:
+  - |
+    The following ``image_cache`` tempest client for glance v2 image
+    caching API is implemented in this release.
+
diff --git a/releasenotes/notes/add-server-external-events-client-c86b269b0091077b.yaml b/releasenotes/notes/add-server-external-events-client-c86b269b0091077b.yaml
new file mode 100644
index 0000000..2af8e95
--- /dev/null
+++ b/releasenotes/notes/add-server-external-events-client-c86b269b0091077b.yaml
@@ -0,0 +1,5 @@
+---
+features:
+  - |
+    The ``server_external_events`` tempest client for compute
+    Server External Events API is implemented in this release.
diff --git a/releasenotes/notes/drop-py-3-6-and-3-7-a34f2294f5341539.yaml b/releasenotes/notes/drop-py-3-6-and-3-7-a34f2294f5341539.yaml
new file mode 100644
index 0000000..ec4e2f2
--- /dev/null
+++ b/releasenotes/notes/drop-py-3-6-and-3-7-a34f2294f5341539.yaml
@@ -0,0 +1,6 @@
+---
+upgrade:
+  - |
+    Python 3.6 and 3.7 support has been dropped. Last release of Tempest
+    to support python 3.6 and 3.7 is Temepst 30.0.0. The minimum version
+    of Python now supported by Tempest is Python 3.8.
diff --git a/releasenotes/notes/end-of-support-of-victoria-9c33f2b089b14cb5.yaml b/releasenotes/notes/end-of-support-of-victoria-9c33f2b089b14cb5.yaml
new file mode 100644
index 0000000..c644e3a
--- /dev/null
+++ b/releasenotes/notes/end-of-support-of-victoria-9c33f2b089b14cb5.yaml
@@ -0,0 +1,12 @@
+---
+prelude: |
+    This is an intermediate release during the Zed development cycle to
+    mark the end of support for EM Victoria release in Tempest.
+    After this release, Tempest will support below OpenStack Releases:
+
+    * Yoga
+    * Xena
+    * Wallaby
+
+    Current development of Tempest is for OpenStack Zed development
+    cycle.
diff --git a/releasenotes/notes/end-of-support-of-wallaby-455e4871ae4cb32e.yaml b/releasenotes/notes/end-of-support-of-wallaby-455e4871ae4cb32e.yaml
new file mode 100644
index 0000000..d5c2974
--- /dev/null
+++ b/releasenotes/notes/end-of-support-of-wallaby-455e4871ae4cb32e.yaml
@@ -0,0 +1,12 @@
+---
+prelude: |
+    This is an intermediate release during the 2023.1 development cycle to
+    mark the end of support for EM Wallaby release in Tempest.
+    After this release, Tempest will support below OpenStack Releases:
+
+    * Zed
+    * Yoga
+    * Xena
+
+    Current development of Tempest is for OpenStack 2023.1 development
+    cycle.
diff --git a/releasenotes/notes/enforce_scope_placement-47a12c741e330f60.yaml b/releasenotes/notes/enforce_scope_placement-47a12c741e330f60.yaml
new file mode 100644
index 0000000..e5e602e
--- /dev/null
+++ b/releasenotes/notes/enforce_scope_placement-47a12c741e330f60.yaml
@@ -0,0 +1,4 @@
+---
+prelude: >
+    Adding placement service for config options ``enforce_scope`` so that
+    we can switch the scope and new defaults enforcement for placement service.
diff --git a/releasenotes/notes/image_multiple_locations-cda4453567953c1d.yaml b/releasenotes/notes/image_multiple_locations-cda4453567953c1d.yaml
new file mode 100644
index 0000000..c8a026e
--- /dev/null
+++ b/releasenotes/notes/image_multiple_locations-cda4453567953c1d.yaml
@@ -0,0 +1,8 @@
+---
+features:
+  - |
+    Add a new config option
+    `[image_feature_enabled]/manage_locations` which enables
+    tests for the `show_multiple_locations=True` functionality in
+    glance. In order for this to work you must also have a store
+    capable of hosting images with an HTTP URI.
diff --git a/releasenotes/notes/measure-downtime-during-live-migration-5e8305be270de680.yaml b/releasenotes/notes/measure-downtime-during-live-migration-5e8305be270de680.yaml
new file mode 100644
index 0000000..9f4abd1
--- /dev/null
+++ b/releasenotes/notes/measure-downtime-during-live-migration-5e8305be270de680.yaml
@@ -0,0 +1,9 @@
+---
+features:
+  - |
+    Added new module net_downtime including the fixture NetDowntimeMeter that
+    can be used to measure how long the connectivity with an IP is lost
+    during certain operations like a server live migration.
+    The configuration option allowed_network_downtime has been added with a
+    default value of 5.0 seconds, which would be the maximum time that
+    the connectivity downtime is expected to last.
diff --git a/releasenotes/notes/temp_url_tests_digest_config-3d8c9bb271961ddd.yaml b/releasenotes/notes/temp_url_tests_digest_config-3d8c9bb271961ddd.yaml
new file mode 100644
index 0000000..f96c030
--- /dev/null
+++ b/releasenotes/notes/temp_url_tests_digest_config-3d8c9bb271961ddd.yaml
@@ -0,0 +1,11 @@
+---
+features:
+  - |
+    Add configuration parameter `tempurl_digest_hashlib` into
+    `object-storage-feature-enabled` which configures the hashing algorithm to
+    use for the temp_url tests; defaults to 'sha256'.
+security:
+  - |
+    Swift used to support only 'sha1' for temp_url hashing but from many
+    years now 'sha256' and 'sha512' are also available. These are stronger
+    than 'sha1' and tempest now allows configuring which one to use.
diff --git a/releasenotes/notes/tempest-zed-release-335293c4a7f5a4b1.yaml b/releasenotes/notes/tempest-zed-release-335293c4a7f5a4b1.yaml
new file mode 100644
index 0000000..841aa5d
--- /dev/null
+++ b/releasenotes/notes/tempest-zed-release-335293c4a7f5a4b1.yaml
@@ -0,0 +1,17 @@
+---
+prelude: |
+    This release is to tag Tempest for OpenStack Zed release.
+    This release marks the start of Zed release support in Tempest.
+    After this release, Tempest will support below OpenStack Releases:
+
+    * Zed
+    * Yoga
+    * Xena
+    * Wallaby
+
+    Current development of Tempest is for OpenStack 2023.1 development
+    cycle. Every Tempest commit is also tested against master during
+    the 2023.1 cycle. However, this does not necessarily mean that using
+    Tempest as of this tag will work against a 2023.1 (or future release)
+    cloud.
+    To be on safe side, use this tag to test the OpenStack Zed release.
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index 122f7c7..ccd5fe1 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,10 @@
    :maxdepth: 1
 
    unreleased
+   v33.0.0
+   v32.0.0
+   v31.1.0
+   v31.0.0
    v30.0.0
    v29.2.0
    v29.1.0
diff --git a/releasenotes/source/v31.0.0.rst b/releasenotes/source/v31.0.0.rst
new file mode 100644
index 0000000..8fb797c
--- /dev/null
+++ b/releasenotes/source/v31.0.0.rst
@@ -0,0 +1,5 @@
+=====================
+v31.0.0 Release Notes
+=====================
+.. release-notes:: 31.0.0 Release Notes
+   :version: 31.0.0
diff --git a/releasenotes/source/v31.1.0.rst b/releasenotes/source/v31.1.0.rst
new file mode 100644
index 0000000..ecd7c36
--- /dev/null
+++ b/releasenotes/source/v31.1.0.rst
@@ -0,0 +1,5 @@
+=====================
+v31.1.0 Release Notes
+=====================
+.. release-notes:: 31.1.0 Release Notes
+   :version: 31.1.0
diff --git a/releasenotes/source/v32.0.0.rst b/releasenotes/source/v32.0.0.rst
new file mode 100644
index 0000000..e4c2cea
--- /dev/null
+++ b/releasenotes/source/v32.0.0.rst
@@ -0,0 +1,5 @@
+=====================
+v32.0.0 Release Notes
+=====================
+.. release-notes:: 32.0.0 Release Notes
+   :version: 32.0.0
diff --git a/releasenotes/source/v33.0.0.rst b/releasenotes/source/v33.0.0.rst
new file mode 100644
index 0000000..fe7bd7d
--- /dev/null
+++ b/releasenotes/source/v33.0.0.rst
@@ -0,0 +1,5 @@
+=====================
+v33.0.0 Release Notes
+=====================
+.. release-notes:: 33.0.0 Release Notes
+   :version: 33.0.0
diff --git a/requirements.txt b/requirements.txt
index c4c7fcc..a118856 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -21,3 +21,4 @@
 PrettyTable>=0.7.1 # BSD
 urllib3>=1.21.1 # MIT
 debtcollector>=1.2.0 # Apache-2.0
+defusedxml>=0.7.1 # PSFL
diff --git a/roles/run-tempest-26/README.rst b/roles/run-tempest-26/README.rst
index 3643edb..8ff1656 100644
--- a/roles/run-tempest-26/README.rst
+++ b/roles/run-tempest-26/README.rst
@@ -21,7 +21,7 @@
    A regular expression used to select the tests.
 
    It works only when used with some specific tox environments
-   ('all', 'all-plugin'.)
+   ('all', 'all-site-packages')
 
    In the following example only api scenario and third party tests
    will be executed.
@@ -47,7 +47,7 @@
    A regular expression used to skip the tests.
 
    It works only when used with some specific tox environments
-   ('all', 'all-plugin'.)
+   ('all', 'all-site-packages').
 
        ::
            vars:
diff --git a/roles/run-tempest/README.rst b/roles/run-tempest/README.rst
index 1919393..04db849 100644
--- a/roles/run-tempest/README.rst
+++ b/roles/run-tempest/README.rst
@@ -21,7 +21,7 @@
    A regular expression used to select the tests.
 
    It works only when used with some specific tox environments
-   ('all', 'all-plugin'.)
+   ('all', 'all-site-packages').
 
    In the following example only api scenario and third party tests
    will be executed.
@@ -56,7 +56,7 @@
    A regular expression used to skip the tests.
 
    It works only when used with some specific tox environments
-   ('all', 'all-plugin'.)
+   ('all', 'all-site-packages').
 
        ::
            vars:
@@ -81,7 +81,7 @@
 .. zuul:rolevar:: stable_constraints_file
    :default: ''
 
-   Upper constraints file to be used for stable branch till stable/train.
+   Upper constraints file to be used for stable branch till stable/victoria.
 
 .. zuul:rolevar:: tempest_tox_environment
    :default: ''
diff --git a/roles/run-tempest/tasks/main.yaml b/roles/run-tempest/tasks/main.yaml
index 397de1e..f302fa5 100644
--- a/roles/run-tempest/tasks/main.yaml
+++ b/roles/run-tempest/tasks/main.yaml
@@ -25,11 +25,11 @@
     target_branch: "{{ zuul.override_checkout }}"
   when: zuul.override_checkout is defined
 
-- name: Use stable branch upper-constraints till stable/train
+- name: Use stable branch upper-constraints till stable/victoria
   set_fact:
     # TOX_CONSTRAINTS_FILE is new name, UPPER_CONSTRAINTS_FILE is old one, best to set both
     tempest_tox_environment: "{{ tempest_tox_environment | combine({'UPPER_CONSTRAINTS_FILE': stable_constraints_file}) | combine({'TOX_CONSTRAINTS_FILE': stable_constraints_file}) }}"
-  when: target_branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein", "stable/train"]
+  when: target_branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein", "stable/train", "stable/ussuri", "stable/victoria"]
 
 - name: Use Configured upper-constraints for non-master Tempest
   set_fact:
@@ -78,16 +78,16 @@
         exclude_list_option: "--exclude-list={{ tempest_test_exclude_list|quote }}"
       when: exclude_list_stat.stat.exists
 
-- name: stable/train workaround to fallback exclude-list to blacklist
-  # NOTE(gmann): stable/train use Tempest 26.1.0 and with stestr 2.5.1
-  # (beacause of upper constraints of stestr 2.5.1 in stable/train) which
-  # does not have new args exclude-list so let's fallback to old arg
-  # if new arg is passed.
+- name: Tempest 26.1.0 workaround to fallback exclude-list to blacklist
+  # NOTE(gmann): stable/train|ussuri|victoria use Tempest 26.1.0 and with
+  # stestr 2.5.1/3.0.1 (beacause of upper constraints of stestr 2.5.1/3.0.1
+  # in stable/train|ussuri|victoria) which does not have new args exclude-list
+  # so let's fallback to old arg if new arg is passed.
   set_fact:
     exclude_list_option: "--blacklist-file={{ tempest_test_exclude_list|quote }}"
   when:
     - tempest_test_exclude_list is defined
-    - target_branch == "stable/train"
+    - target_branch in ["stable/train", "stable/ussuri", "stable/victoria"]
 
 # TODO(kopecmartin) remove this after all consumers of the role have switched
 # to tempest_exclude_regex option, until then it's kept here for the backward
@@ -105,19 +105,19 @@
   when:
     - tempest_black_regex is not defined
     - tempest_exclude_regex is defined
-    - target_branch != "stable/train"
+    - target_branch not in ["stable/train", "stable/ussuri", "stable/victoria"]
 
-- name: stable/train workaround to fallback exclude-regex to black-regex
-  # NOTE(gmann): stable/train use Tempest 26.1.0 and with stestr 2.5.1
-  # (beacause of upper constraints of stestr 2.5.1 in stable/train) which
-  # does not have new args exclude-regex so let's fallback to old arg
-  # if new arg is passed.
+- name: Tempest 26.1.0 workaround to fallback exclude-regex to black-regex
+  # NOTE(gmann): stable/train|ussuri|victoria use Tempest 26.1.0 and with stestr
+  # 2.5.1/3.0.1 (beacause of upper constraints of stestr 2.5.1/3.0.1 in
+  # stable/train|ussuri|victoria) which does not have new args exclude-list so
+  # let's fallback to old arg if new arg is passed.
   set_fact:
     tempest_test_exclude_regex: "--black-regex={{tempest_exclude_regex|quote}}"
   when:
     - tempest_black_regex is not defined
     - tempest_exclude_regex is defined
-    - target_branch == "stable/train"
+    - target_branch in ["stable/train", "stable/ussuri", "stable/victoria"]
 
 - name: Run Tempest
   command: tox -e {{tox_envlist}} {{tox_extra_args}} -- {{tempest_test_regex|quote}} \
diff --git a/setup.cfg b/setup.cfg
index a41eccf..beaf9b4 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -6,7 +6,7 @@
 author = OpenStack
 author_email = openstack-discuss@lists.openstack.org
 home_page = https://docs.openstack.org/tempest/latest/
-python_requires = >=3.6
+python_requires = >=3.8
 classifier =
     Intended Audience :: Information Technology
     Intended Audience :: System Administrators
@@ -15,10 +15,9 @@
     Operating System :: POSIX :: Linux
     Programming Language :: Python
     Programming Language :: Python :: 3
-    Programming Language :: Python :: 3.6
-    Programming Language :: Python :: 3.7
     Programming Language :: Python :: 3.8
     Programming Language :: Python :: 3.9
+    Programming Language :: Python :: 3.10
     Programming Language :: Python :: 3 :: Only
     Programming Language :: Python :: Implementation :: CPython
 
diff --git a/tempest/api/compute/admin/test_live_migration.py b/tempest/api/compute/admin/test_live_migration.py
index c91b557..2826f56 100644
--- a/tempest/api/compute/admin/test_live_migration.py
+++ b/tempest/api/compute/admin/test_live_migration.py
@@ -34,11 +34,6 @@
 class LiveMigrationTestBase(base.BaseV2ComputeAdminTest):
     """Test live migration operations supported by admin user"""
 
-    # These tests don't attempt any SSH validation nor do they use
-    # floating IPs on the instance, so all we need is a network and
-    # a subnet so the instance being migrated has a single port, but
-    # we need that to make sure we are properly updating the port
-    # host bindings during the live migration.
     create_default_network = True
 
     @classmethod
@@ -104,6 +99,11 @@
     max_microversion = '2.24'
     block_migration = None
 
+    @classmethod
+    def setup_credentials(cls):
+        cls.prepare_instance_network()
+        super(LiveMigrationTest, cls).setup_credentials()
+
     def _test_live_migration(self, state='ACTIVE', volume_backed=False):
         """Tests live migration between two hosts.
 
@@ -182,7 +182,12 @@
         attach volume. This differs from test_volume_backed_live_migration
         above that tests live-migration with only an attached volume.
         """
-        server = self.create_test_server(wait_until="ACTIVE")
+        validation_resources = self.get_class_validation_resources(
+            self.os_primary)
+        server = self.create_test_server(
+            validatable=True,
+            validation_resources=validation_resources,
+            wait_until="SSHABLE")
         server_id = server['id']
         if not CONF.compute_feature_enabled.can_migrate_between_any_hosts:
             # not to specify a host so that the scheduler will pick one
diff --git a/tempest/api/compute/admin/test_server_external_events.py b/tempest/api/compute/admin/test_server_external_events.py
new file mode 100644
index 0000000..1c5c295
--- /dev/null
+++ b/tempest/api/compute/admin/test_server_external_events.py
@@ -0,0 +1,37 @@
+# Copyright 2022 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.compute import base
+from tempest.lib import decorators
+
+
+class ServerExternalEventsTest(base.BaseV2ComputeAdminTest):
+    """Test server external events test"""
+
+    @decorators.idempotent_id('6bbf4723-61d2-4372-af55-7ba27f1c9ba6')
+    def test_create_server_external_events(self):
+        """Test create a server and add some external events"""
+        server_id = self.create_test_server(wait_until='ACTIVE')['id']
+        events = [
+            {
+                "name": "network-changed",
+                "server_uuid": server_id,
+            }
+        ]
+        client = self.os_admin.server_external_events_client
+        events_resp = client.create_server_external_events(
+            events=events)['events'][0]
+        self.assertEqual(server_id, events_resp['server_uuid'])
+        self.assertEqual('network-changed', events_resp['name'])
+        self.assertEqual(200, events_resp['code'])
diff --git a/tempest/api/compute/admin/test_servers_on_multinodes.py b/tempest/api/compute/admin/test_servers_on_multinodes.py
index f440428..9082306 100644
--- a/tempest/api/compute/admin/test_servers_on_multinodes.py
+++ b/tempest/api/compute/admin/test_servers_on_multinodes.py
@@ -16,6 +16,7 @@
 
 from tempest.api.compute import base
 from tempest.common import compute
+from tempest.common import waiters
 from tempest import config
 from tempest.lib import decorators
 
@@ -125,3 +126,47 @@
         hostnames = list(hosts.values())
         self.assertEqual(hostnames[0], hostnames[1],
                          'Servers are on the different hosts: %s' % hosts)
+
+
+class UnshelveToHostMultiNodesTest(base.BaseV2ComputeAdminTest):
+    """Test to unshelve server in between hosts."""
+    min_microversion = '2.91'
+    max_microversion = 'latest'
+
+    @classmethod
+    def skip_checks(cls):
+        super(UnshelveToHostMultiNodesTest, cls).skip_checks()
+
+        if CONF.compute.min_compute_nodes < 2:
+            raise cls.skipException(
+                "Less than 2 compute nodes, skipping multi-nodes test.")
+
+    def _shelve_offload_then_unshelve_to_host(self, server, host):
+        compute.shelve_server(self.servers_client, server['id'],
+                              force_shelve_offload=True)
+
+        self.os_admin.servers_client.unshelve_server(
+            server['id'],
+            body={'unshelve': {'host': host}}
+            )
+        waiters.wait_for_server_status(self.servers_client, server['id'],
+                                       'ACTIVE')
+
+    @decorators.idempotent_id('b5cc0889-50c2-46a0-b8ff-b5fb4c3a6e20')
+    def test_unshelve_to_specific_host(self):
+        """Test unshelve to a specific host, new behavior introduced in
+        microversion 2.91.
+        1. Shelve offload server.
+        2. Request unshelve to original host and verify server land on it.
+        3. Shelve offload server again.
+        4. Request unshelve to the other host and verify server land on it.
+        """
+        server = self.create_test_server(wait_until='ACTIVE')
+        host = self.get_host_for_server(server['id'])
+        otherhost = self.get_host_other_than(server['id'])
+
+        self._shelve_offload_then_unshelve_to_host(server, host)
+        self.assertEqual(host, self.get_host_for_server(server['id']))
+
+        self._shelve_offload_then_unshelve_to_host(server, otherhost)
+        self.assertEqual(otherhost, self.get_host_for_server(server['id']))
diff --git a/tempest/api/compute/admin/test_volume_swap.py b/tempest/api/compute/admin/test_volume_swap.py
index c1236a7..7da87c7 100644
--- a/tempest/api/compute/admin/test_volume_swap.py
+++ b/tempest/api/compute/admin/test_volume_swap.py
@@ -26,6 +26,11 @@
     create_default_network = True
 
     @classmethod
+    def setup_credentials(cls):
+        cls.prepare_instance_network()
+        super(TestVolumeSwapBase, cls).setup_credentials()
+
+    @classmethod
     def skip_checks(cls):
         super(TestVolumeSwapBase, cls).skip_checks()
         if not CONF.compute_feature_enabled.swap_volume:
@@ -100,7 +105,16 @@
         volume1 = self.create_volume()
         volume2 = self.create_volume()
         # Boot server
-        server = self.create_test_server(wait_until='ACTIVE')
+        validation_resources = self.get_class_validation_resources(
+            self.os_primary)
+        # NOTE(gibi): We need to wait for the guest to fully boot as the test
+        # will attach a volume to the server and therefore cleanup will try to
+        # detach it. See bug 1960346 for details.
+        server = self.create_test_server(
+            validatable=True,
+            validation_resources=validation_resources,
+            wait_until='SSHABLE'
+        )
         # Attach "volume1" to server
         self.attach_volume(server, volume1)
         # Swap volume from "volume1" to "volume2"
@@ -200,9 +214,18 @@
         volume2 = self.create_volume(multiattach=True)
 
         # Create two servers and wait for them to be ACTIVE.
+        validation_resources = self.get_class_validation_resources(
+            self.os_primary)
+        # NOTE(gibi): We need to wait for the guests to fully boot as the test
+        # will attach volumes to the servers and therefore cleanup will try to
+        # detach them. See bug 1960346 for details.
         reservation_id = self.create_test_server(
-            wait_until='ACTIVE', min_count=2,
-            return_reservation_id=True)['reservation_id']
+            validatable=True,
+            validation_resources=validation_resources,
+            wait_until='SSHABLE',
+            min_count=2,
+            return_reservation_id=True,
+        )['reservation_id']
         # Get the servers using the reservation_id.
         servers = self.servers_client.list_servers(
             reservation_id=reservation_id)['servers']
diff --git a/tempest/api/compute/admin/test_volumes_negative.py b/tempest/api/compute/admin/test_volumes_negative.py
index 10d522b..91ab09e 100644
--- a/tempest/api/compute/admin/test_volumes_negative.py
+++ b/tempest/api/compute/admin/test_volumes_negative.py
@@ -28,21 +28,22 @@
     create_default_network = True
 
     @classmethod
+    def setup_credentials(cls):
+        cls.prepare_instance_network()
+        super(VolumesAdminNegativeTest, cls).setup_credentials()
+
+    @classmethod
     def skip_checks(cls):
         super(VolumesAdminNegativeTest, cls).skip_checks()
         if not CONF.service_available.cinder:
             skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
             raise cls.skipException(skip_msg)
 
-    @classmethod
-    def resource_setup(cls):
-        super(VolumesAdminNegativeTest, cls).resource_setup()
-        cls.server = cls.create_test_server(wait_until='ACTIVE')
-
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('309b5ecd-0585-4a7e-a36f-d2b2bf55259d')
     def test_update_attached_volume_with_nonexistent_volume_in_uri(self):
         """Test swapping non existent volume should fail"""
+        self.server = self.create_test_server(wait_until="ACTIVE")
         volume = self.create_volume()
         nonexistent_volume = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
@@ -55,6 +56,17 @@
     @decorators.idempotent_id('7dcac15a-b107-46d3-a5f6-cb863f4e454a')
     def test_update_attached_volume_with_nonexistent_volume_in_body(self):
         """Test swapping volume to a non existence volume should fail"""
+        validation_resources = self.get_class_validation_resources(
+            self.os_primary)
+        # NOTE(gibi): We need to wait for the guest to fully boot as
+        # test_update_attached_volume_with_nonexistent_volume_in_body case
+        # will attach a volume to it and therefore cleanup will try to detach
+        # it. See bug 1960346 for details.
+        self.server = self.create_test_server(
+            validatable=True,
+            validation_resources=validation_resources,
+            wait_until="SSHABLE")
+
         volume = self.create_volume()
         self.attach_volume(self.server, volume)
 
@@ -76,6 +88,13 @@
     min_microversion = '2.60'
     volume_min_microversion = '3.27'
 
+    create_default_network = True
+
+    @classmethod
+    def setup_credentials(cls):
+        cls.prepare_instance_network()
+        super(UpdateMultiattachVolumeNegativeTest, cls).setup_credentials()
+
     @classmethod
     def skip_checks(cls):
         super(UpdateMultiattachVolumeNegativeTest, cls).skip_checks()
@@ -101,8 +120,21 @@
         vol2 = self.create_volume(multiattach=True)
 
         # Create two instances.
-        server1 = self.create_test_server(wait_until='ACTIVE')
-        server2 = self.create_test_server(wait_until='ACTIVE')
+        validation_resources = self.get_class_validation_resources(
+            self.os_primary)
+        # NOTE(gibi): We need to wait for the guests to fully boot as the test
+        # will attach volumes to the servers and therefore cleanup will try to
+        # detach them. See bug 1960346 for details.
+        server1 = self.create_test_server(
+            validatable=True,
+            validation_resources=validation_resources,
+            wait_until='SSHABLE'
+        )
+        server2 = self.create_test_server(
+            validatable=True,
+            validation_resources=validation_resources,
+            wait_until='SSHABLE'
+        )
 
         # Attach vol1 to both of these instances.
         vol1_attachment1 = self.attach_volume(server1, vol1)
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index e16afaf..ea1cddc 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -66,7 +66,9 @@
         # Setting network=True, subnet=True creates a default network
         cls.set_network_resources(
             network=cls.create_default_network,
-            subnet=cls.create_default_network)
+            subnet=cls.create_default_network,
+            router=cls.create_default_network,
+            dhcp=cls.create_default_network)
         super(BaseV2ComputeTest, cls).setup_credentials()
 
     @classmethod
@@ -412,7 +414,8 @@
         return image
 
     @classmethod
-    def recreate_server(cls, server_id, validatable=False, **kwargs):
+    def recreate_server(cls, server_id, validatable=False, wait_until='ACTIVE',
+                        **kwargs):
         """Destroy an existing class level server and creates a new one
 
         Some test classes use a test server that can be used by multiple
@@ -440,7 +443,7 @@
             validatable,
             validation_resources=cls.get_class_validation_resources(
                 cls.os_primary),
-            wait_until='ACTIVE',
+            wait_until=wait_until,
             adminPass=cls.password,
             **kwargs)
         return server['id']
@@ -455,15 +458,31 @@
         except Exception:
             LOG.exception('Failed to delete server %s', server_id)
 
-    def resize_server(self, server_id, new_flavor_id, **kwargs):
+    def resize_server(
+        self, server_id, new_flavor_id, wait_until='ACTIVE', **kwargs
+    ):
         """resize and confirm_resize an server, waits for it to be ACTIVE."""
         self.servers_client.resize_server(server_id, new_flavor_id, **kwargs)
         waiters.wait_for_server_status(self.servers_client, server_id,
                                        'VERIFY_RESIZE')
         self.servers_client.confirm_resize_server(server_id)
+
         waiters.wait_for_server_status(
             self.servers_client, server_id, 'ACTIVE')
         server = self.servers_client.show_server(server_id)['server']
+
+        validation_resources = self.get_class_validation_resources(
+            self.os_primary)
+        if (
+            validation_resources and
+            wait_until in ("SSHABLE", "PINGABLE") and
+            CONF.validation.run_validation
+        ):
+            tenant_network = self.get_tenant_network()
+            compute.wait_for_ssh_or_ping(
+                server, self.os_primary, tenant_network,
+                True, validation_resources, wait_until, True)
+
         self.assert_flavor_equal(new_flavor_id, server['flavor'])
 
     def reboot_server(self, server_id, type):
@@ -679,6 +698,8 @@
             binary='nova-compute')['services']
         hosts = []
         for svc in svcs:
+            if svc['host'].endswith('-ironic'):
+                continue
             if svc['state'] == 'up' and svc['status'] == 'enabled':
                 if CONF.compute.compute_volume_common_az:
                     if svc['zone'] == CONF.compute.compute_volume_common_az:
diff --git a/tempest/api/compute/images/test_images.py b/tempest/api/compute/images/test_images.py
index 91ce1f9..d47ffce 100644
--- a/tempest/api/compute/images/test_images.py
+++ b/tempest/api/compute/images/test_images.py
@@ -128,3 +128,27 @@
                                               wait_for_server=False)
         self.addCleanup(self.client.delete_image, image['id'])
         self.assertEqual(snapshot_name, image['name'])
+
+    @decorators.idempotent_id('f3cac456-e3fe-4183-a7a7-a59f7f017088')
+    def test_create_server_from_snapshot(self):
+        # Create one server normally
+        server = self.create_test_server(wait_until='ACTIVE')
+        self.addCleanup(self.servers_client.delete_server, server['id'])
+
+        # Snapshot it
+        snapshot_name = data_utils.rand_name('test-snap')
+        image = self.create_image_from_server(server['id'],
+                                              name=snapshot_name,
+                                              wait_until='ACTIVE',
+                                              wait_for_server=False)
+        self.addCleanup(self.client.delete_image, image['id'])
+
+        # Try to create another server from that snapshot
+        server2 = self.create_test_server(wait_until='ACTIVE',
+                                          image_id=image['id'])
+
+        # Delete server 2 before we finish otherwise we'll race with
+        # the cleanup which tries to delete the image before the
+        # server is gone.
+        self.servers_client.delete_server(server2['id'])
+        waiters.wait_for_server_termination(self.servers_client, server2['id'])
diff --git a/tempest/api/compute/images/test_images_negative.py b/tempest/api/compute/images/test_images_negative.py
index 5ff2a6a..124651e 100644
--- a/tempest/api/compute/images/test_images_negative.py
+++ b/tempest/api/compute/images/test_images_negative.py
@@ -43,6 +43,7 @@
 
 class ImagesNegativeTestJSON(ImagesNegativeTestBase):
     """Negative tests of server image"""
+    create_default_network = True
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('6cd5a89d-5b47-46a7-93bc-3916f0d84973')
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 870c6f5..e1e7fda 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -43,7 +43,7 @@
         super(ServerActionsTestJSON, self).setUp()
         # Check if the server is in a clean state after test
         try:
-            validation_resources = self.get_class_validation_resources(
+            self.validation_resources = self.get_class_validation_resources(
                 self.os_primary)
             # _test_rebuild_server test compares ip address attached to the
             # server before and after the rebuild, in order to avoid
@@ -53,24 +53,24 @@
             waiters.wait_for_server_floating_ip(
                 self.client,
                 self.client.show_server(self.server_id)['server'],
-                validation_resources['floating_ip'])
+                self.validation_resources['floating_ip'])
             waiters.wait_for_server_status(self.client,
                                            self.server_id, 'ACTIVE')
         except lib_exc.NotFound:
             # The server was deleted by previous test, create a new one
             # Use class level validation resources to avoid them being
             # deleted once a test is over
-            validation_resources = self.get_class_validation_resources(
+            self.validation_resources = self.get_class_validation_resources(
                 self.os_primary)
             server = self.create_test_server(
                 validatable=True,
-                validation_resources=validation_resources,
-                wait_until='ACTIVE')
+                validation_resources=self.validation_resources,
+                wait_until='SSHABLE')
             self.__class__.server_id = server['id']
         except Exception:
             # Rebuild server if something happened to it during a test
             self.__class__.server_id = self.recreate_server(
-                self.server_id, validatable=True)
+                self.server_id, validatable=True, wait_until='SSHABLE')
 
     def tearDown(self):
         super(ServerActionsTestJSON, self).tearDown()
@@ -92,7 +92,8 @@
     @classmethod
     def resource_setup(cls):
         super(ServerActionsTestJSON, cls).resource_setup()
-        cls.server_id = cls.recreate_server(None, validatable=True)
+        cls.server_id = cls.recreate_server(None, validatable=True,
+                                            wait_until='SSHABLE')
 
     @decorators.idempotent_id('6158df09-4b82-4ab3-af6d-29cf36af858d')
     @testtools.skipUnless(CONF.compute_feature_enabled.change_password,
@@ -105,11 +106,9 @@
         """
         # Since this test messes with the password and makes the
         # server unreachable, it should create its own server
-        validation_resources = self.get_test_validation_resources(
-            self.os_primary)
         newserver = self.create_test_server(
             validatable=True,
-            validation_resources=validation_resources,
+            validation_resources=self.validation_resources,
             wait_until='ACTIVE')
         self.addCleanup(self.delete_server, newserver['id'])
         # The server's password should be set to the provided password
@@ -121,7 +120,7 @@
             # Verify that the user can authenticate with the new password
             server = self.client.show_server(newserver['id'])['server']
             linux_client = remote_client.RemoteClient(
-                self.get_server_ip(server, validation_resources),
+                self.get_server_ip(server, self.validation_resources),
                 self.ssh_user,
                 new_password,
                 server=server,
@@ -130,15 +129,13 @@
 
     def _test_reboot_server(self, reboot_type):
         if CONF.validation.run_validation:
-            validation_resources = self.get_class_validation_resources(
-                self.os_primary)
             # Get the time the server was last rebooted,
             server = self.client.show_server(self.server_id)['server']
             linux_client = remote_client.RemoteClient(
-                self.get_server_ip(server, validation_resources),
+                self.get_server_ip(server, self.validation_resources),
                 self.ssh_user,
                 self.password,
-                validation_resources['keypair']['private_key'],
+                self.validation_resources['keypair']['private_key'],
                 server=server,
                 servers_client=self.client)
             boot_time = linux_client.get_boot_time()
@@ -152,10 +149,10 @@
         if CONF.validation.run_validation:
             # Log in and verify the boot time has changed
             linux_client = remote_client.RemoteClient(
-                self.get_server_ip(server, validation_resources),
+                self.get_server_ip(server, self.validation_resources),
                 self.ssh_user,
                 self.password,
-                validation_resources['keypair']['private_key'],
+                self.validation_resources['keypair']['private_key'],
                 server=server,
                 servers_client=self.client)
             new_boot_time = linux_client.get_boot_time()
@@ -184,10 +181,18 @@
         server = self.client.show_server(server['id'])['server']
         self.assertNotIn('security_groups', server)
 
-    def _rebuild_server_and_check(self, image_ref):
-        rebuilt_server = (self.client.rebuild_server(self.server_id, image_ref)
+    def _rebuild_server_and_check(self, image_ref, server):
+        rebuilt_server = (self.client.rebuild_server(server['id'], image_ref)
                           ['server'])
-        waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
+        if CONF.validation.run_validation:
+            tenant_network = self.get_tenant_network()
+            compute.wait_for_ssh_or_ping(
+                server, self.os_primary, tenant_network,
+                True, self.validation_resources, "SSHABLE", True)
+        else:
+            waiters.wait_for_server_status(self.client, server['id'],
+                                           'ACTIVE')
+
         msg = ('Server was not rebuilt to the original image. '
                'The original image: {0}. The current image: {1}'
                .format(image_ref, rebuilt_server['image']['id']))
@@ -211,7 +216,8 @@
         # If the server was rebuilt on a different image, restore it to the
         # original image once the test ends
         if self.image_ref_alt != self.image_ref:
-            self.addCleanup(self._rebuild_server_and_check, self.image_ref)
+            self.addCleanup(self._rebuild_server_and_check, self.image_ref,
+                            rebuilt_server)
 
         # Verify the properties in the initial response are correct
         self.assertEqual(self.server_id, rebuilt_server['id'])
@@ -229,8 +235,6 @@
         self.assertEqual(original_addresses, server['addresses'])
 
         if CONF.validation.run_validation:
-            validation_resources = self.get_class_validation_resources(
-                self.os_primary)
             # Authentication is attempted in the following order of priority:
             # 1.The key passed in, if one was passed in.
             # 2.Any key we can find through an SSH agent (if allowed).
@@ -238,10 +242,10 @@
             #   ~/.ssh/ (if allowed).
             # 4.Plain username/password auth, if a password was given.
             linux_client = remote_client.RemoteClient(
-                self.get_server_ip(rebuilt_server, validation_resources),
+                self.get_server_ip(rebuilt_server, self.validation_resources),
                 self.ssh_alt_user,
                 password,
-                validation_resources['keypair']['private_key'],
+                self.validation_resources['keypair']['private_key'],
                 server=rebuilt_server,
                 servers_client=self.client)
             linux_client.validate_authentication()
@@ -272,7 +276,7 @@
         # If the server was rebuilt on a different image, restore it to the
         # original image once the test ends
         if self.image_ref_alt != self.image_ref:
-            self.addCleanup(self._rebuild_server_and_check, old_image)
+            self.addCleanup(self._rebuild_server_and_check, old_image, server)
 
         # Verify the properties in the initial response are correct
         self.assertEqual(self.server_id, rebuilt_server['id'])
@@ -317,13 +321,11 @@
         self.assertEqual(self.server_id,
                          vol_after_rebuild['attachments'][0]['server_id'])
         if CONF.validation.run_validation:
-            validation_resources = self.get_class_validation_resources(
-                self.os_primary)
             linux_client = remote_client.RemoteClient(
-                self.get_server_ip(server, validation_resources),
+                self.get_server_ip(server, self.validation_resources),
                 self.ssh_alt_user,
                 password=None,
-                pkey=validation_resources['keypair']['private_key'],
+                pkey=self.validation_resources['keypair']['private_key'],
                 server=server,
                 servers_client=self.client)
             linux_client.validate_authentication()
@@ -375,10 +377,8 @@
         kwargs = {'volume_backed': True,
                   'wait_until': 'ACTIVE'}
         if CONF.validation.run_validation:
-            validation_resources = self.get_test_validation_resources(
-                self.os_primary)
             kwargs.update({'validatable': True,
-                           'validation_resources': validation_resources})
+                           'validation_resources': self.validation_resources})
         server = self.create_test_server(**kwargs)
 
         # NOTE(mgoddard): Get detailed server to ensure addresses are present
@@ -394,10 +394,10 @@
             self.client.get_console_output(server['id'])
         if CONF.validation.run_validation:
             linux_client = remote_client.RemoteClient(
-                self.get_server_ip(server, validation_resources),
+                self.get_server_ip(server, self.validation_resources),
                 self.ssh_user,
                 password=None,
-                pkey=validation_resources['keypair']['private_key'],
+                pkey=self.validation_resources['keypair']['private_key'],
                 server=server,
                 servers_client=self.client)
             linux_client.validate_authentication()
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index e4ec209..5380c67 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -378,10 +378,19 @@
                   the created volume, and dict of server ID to volumeAttachment
                   dict entries
         """
+        validation_resources = self.get_class_validation_resources(
+            self.os_primary)
+
         servers = []
         for x in range(2):
             name = 'multiattach-server-%i' % x
-            servers.append(self.create_test_server(name=name))
+            servers.append(
+                self.create_test_server(
+                    name=name,
+                    validatable=True,
+                    validation_resources=validation_resources
+                )
+            )
 
         # Now wait for the servers to be ACTIVE.
         for server in servers:
@@ -492,7 +501,10 @@
         servers, volume, _ = self._create_and_multiattach()
 
         for server in servers:
-            self.resize_server(server['id'], self.flavor_ref_alt)
+            # We need to wait until the guest OS fully boots up as we are going
+            # to detach volumes after the resize. See bug #1960346.
+            self.resize_server(
+                server['id'], self.flavor_ref_alt, wait_until='SSHABLE')
 
         for server in servers:
             self._detach_multiattach_volume(volume['id'], server['id'])
diff --git a/tempest/api/image/v2/admin/test_image_caching.py b/tempest/api/image/v2/admin/test_image_caching.py
new file mode 100644
index 0000000..11dcc80
--- /dev/null
+++ b/tempest/api/image/v2/admin/test_image_caching.py
@@ -0,0 +1,153 @@
+# Copyright 2022 Red Hat, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import io
+
+from oslo_log import log as logging
+from tempest.api.image import base
+from tempest.common import waiters
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+class ImageCachingTest(base.BaseV2ImageTest):
+    """Here we test the caching operations for image"""
+    credentials = ['primary', 'admin']
+
+    def setUp(self):
+        super(ImageCachingTest, self).setUp()
+        # NOTE(abhishekk): As caching is enabled instance boot or volume
+        # boot or image download can also cache image, so we are going to
+        # maintain our caching information to avoid disturbing other tests
+        self.cached_info = {}
+
+    def tearDown(self):
+        # Delete all from cache/queue if we exit abruptly
+        for image_id in self.cached_info:
+            self.os_admin.image_cache_client.cache_delete(
+                image_id)
+        super(ImageCachingTest, self).tearDown()
+
+    @classmethod
+    def skip_checks(cls):
+        super(ImageCachingTest, cls).skip_checks()
+        # Check to see if we should even be running these tests.
+        if not CONF.image.image_caching_enabled:
+            raise cls.skipException('Target system is not configured with '
+                                    'glance caching')
+
+    def image_create_and_upload(self, upload=True, **kwargs):
+        """Wrapper that returns a test image."""
+        if 'name' not in kwargs:
+            name = data_utils.rand_name(self.__name__ + "-image")
+            kwargs['name'] = name
+
+        params = dict(kwargs)
+        image = self.create_image(**params)
+        self.assertEqual('queued', image['status'])
+        if not upload:
+            return image
+
+        file_content = data_utils.random_bytes()
+        image_file = io.BytesIO(file_content)
+        self.client.store_image_file(image['id'], image_file)
+
+        image = self.client.show_image(image['id'])
+        return image
+
+    def _assertCheckQueues(self, queued_images):
+        for image in self.cached_info:
+            if self.cached_info[image] == 'queued':
+                self.assertIn(image, queued_images)
+
+    def _assertCheckCache(self, cached_images):
+        cached_list = []
+        for image in cached_images:
+            cached_list.append(image['image_id'])
+
+        for image in self.cached_info:
+            if self.cached_info[image] == 'cached':
+                self.assertIn(image, cached_list)
+
+    @decorators.idempotent_id('4bf6adba-2f9f-47e9-a6d5-37f21ad4387c')
+    def test_image_caching_cycle(self):
+        """Test image cache APIs"""
+        # Ensure that non-admin user is not allowed to perform caching
+        # operations
+        self.assertRaises(lib_exc.Forbidden,
+                          self.os_primary.image_cache_client.list_cache)
+
+        # Check there is nothing is queued for cached by us
+        output = self.os_admin.image_cache_client.list_cache()
+        self._assertCheckQueues(output['queued_images'])
+        self._assertCheckCache(output['cached_images'])
+
+        # Non-existing image should raise NotFound exception
+        self.assertRaises(lib_exc.NotFound,
+                          self.os_admin.image_cache_client.cache_queue,
+                          'non-existing-image-id')
+
+        # Verify that we can not use queued image for queueing
+        image = self.image_create_and_upload(name='queued', upload=False)
+        self.assertRaises(lib_exc.BadRequest,
+                          self.os_admin.image_cache_client.cache_queue,
+                          image['id'])
+
+        # Create one image
+        image = self.image_create_and_upload(name='first',
+                                             container_format='bare',
+                                             disk_format='raw',
+                                             visibility='private')
+        self.assertEqual('active', image['status'])
+
+        # Queue image for caching
+        self.os_admin.image_cache_client.cache_queue(image['id'])
+        self.cached_info[image['id']] = 'queued'
+        # Verify that we have 1 image for queueing and 0 for caching
+        output = self.os_admin.image_cache_client.list_cache()
+        self._assertCheckQueues(output['queued_images'])
+        self._assertCheckCache(output['cached_images'])
+
+        # Wait for image caching
+        LOG.info("Waiting for image %s to get cached", image['id'])
+        caching = waiters.wait_for_caching(
+            self.client,
+            self.os_admin.image_cache_client,
+            image['id'])
+
+        self.cached_info[image['id']] = 'cached'
+        # verify that we have image in cache and not in queued
+        self._assertCheckQueues(caching['queued_images'])
+        self._assertCheckCache(caching['cached_images'])
+
+        # Verify that we can delete images from caching and queueing with
+        # api call.
+        self.os_admin.image_cache_client.cache_clear()
+        output = self.os_admin.image_cache_client.list_cache()
+        self.assertEqual(0, len(output['queued_images']))
+        self.assertEqual(0, len(output['cached_images']))
+
+        # Verify that invalid header value for target returns 400 response
+        self.assertRaises(lib_exc.BadRequest,
+                          self.os_admin.image_cache_client.cache_clear,
+                          target="invalid")
+        # Remove all data from local information
+        self.cached_info = {}
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index d283ab3..d590668 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -49,12 +49,12 @@
             raise cls.skipException('Server does not support '
                                     'any import method')
 
-    def _create_image(self):
+    def _create_image(self, disk_format=None, container_format=None):
         # Create image
         uuid = '00000000-1111-2222-3333-444455556666'
         image_name = data_utils.rand_name('image')
-        container_format = CONF.image.container_formats[0]
-        disk_format = CONF.image.disk_formats[0]
+        container_format = container_format or CONF.image.container_formats[0]
+        disk_format = disk_format or CONF.image.disk_formats[0]
         image = self.create_image(name=image_name,
                                   container_format=container_format,
                                   disk_format=disk_format,
@@ -131,9 +131,144 @@
         # import image from web to backend
         image_uri = CONF.image.http_image
         self.client.image_import(image['id'], method='web-download',
-                                 image_uri=image_uri)
+                                 import_params={'uri': image_uri})
         waiters.wait_for_image_imported_to_stores(self.client, image['id'])
 
+    @decorators.idempotent_id('8876c818-c40e-4b90-9742-31d231616305')
+    def test_image_glance_download_import_success(self):
+        # We use glance-direct initially, then glance-download for test
+        self._require_import_method('glance-direct')
+        self._require_import_method('glance-download')
+
+        # Create an image via the normal import process to be our source
+        src = self._stage_and_check()
+        self.client.image_import(src, method='glance-direct')
+        waiters.wait_for_image_imported_to_stores(self.client, src)
+
+        # Add some properties to it that will be copied by the default
+        # config (and one that won't)
+        self.client.update_image(src, [
+            {'add': '/hw_cpu_cores', 'value': '5'},
+            {'add': '/trait:STORAGE_DISK_SSD', 'value': 'required'},
+            {'add': '/os_distro', 'value': 'rhel'},
+            {'add': '/speed', 'value': '88mph'},
+        ])
+
+        # Make sure our properties stuck on the source image
+        src_image = self.client.show_image(src)
+        self.assertEqual('5', src_image['hw_cpu_cores'])
+        self.assertEqual('required', src_image['trait:STORAGE_DISK_SSD'])
+        self.assertEqual('rhel', src_image['os_distro'])
+        self.assertEqual('88mph', src_image['speed'])
+
+        # Create a new image which we will fill from another glance image
+        dst = self._create_image(container_format='ovf',
+                                 disk_format='iso')['id']
+
+        # Set some values that will conflict to make sure we get the
+        # new ones and confirm they stuck before the import.
+        self.client.update_image(dst, [
+            {'add': '/hw_cpu_cores', 'value': '1'},
+            {'add': '/os_distro', 'value': 'windows'},
+        ])
+        dst_image = self.client.show_image(dst)
+        self.assertEqual('1', dst_image['hw_cpu_cores'])
+        self.assertEqual('windows', dst_image['os_distro'])
+
+        params = {
+            'glance_image_id': src,
+            'glance_region': self.client.region,
+            'glance_service_interface': 'public',
+        }
+        self.client.image_import(dst, method='glance-download',
+                                 import_params=params)
+        waiters.wait_for_image_tasks_status(self.client, dst, 'success')
+
+        # Make sure the new image has all the keys imported from the
+        # original image that we expect
+        dst_image = self.client.show_image(dst)
+        self.assertEqual(src_image['disk_format'], dst_image['disk_format'])
+        self.assertEqual(src_image['container_format'],
+                         dst_image['container_format'])
+        self.assertEqual('5', dst_image['hw_cpu_cores'])
+        self.assertEqual('required', dst_image['trait:STORAGE_DISK_SSD'])
+        self.assertEqual('rhel', dst_image['os_distro'])
+        self.assertNotIn('speed', dst_image)
+
+    @decorators.attr(type=['negative'])
+    @decorators.idempotent_id('36d4b546-64a2-4bb9-bdd0-ba676aa48f2c')
+    def test_image_glance_download_import_bad_uuid(self):
+        self._require_import_method('glance-download')
+        image_id = self._create_image()['id']
+        params = {
+            'glance_image_id': 'foo',
+            'glance_region': self.client.region,
+            'glance_service_interface': 'public',
+        }
+
+        # A non-UUID-like image id should make us fail immediately
+        e = self.assertRaises(lib_exc.BadRequest,
+                              self.client.image_import,
+                              image_id, method='glance-download',
+                              import_params=params)
+        self.assertIn('image id does not look like a UUID', str(e))
+
+    @decorators.attr(type=['negative'])
+    @decorators.idempotent_id('77644240-dbbe-4744-ae28-09b2ac12e218')
+    def test_image_glance_download_import_bad_endpoint(self):
+        self._require_import_method('glance-download')
+        image_id = self._create_image()['id']
+
+        # Set some properties before the import to make sure they are
+        # undisturbed
+        self.client.update_image(image_id, [
+            {'add': '/hw_cpu_cores', 'value': '1'},
+            {'add': '/os_distro', 'value': 'windows'},
+        ])
+        image = self.client.show_image(image_id)
+        self.assertEqual('1', image['hw_cpu_cores'])
+        self.assertEqual('windows', image['os_distro'])
+
+        params = {
+            'glance_image_id': '36d4b546-64a2-4bb9-bdd0-ba676aa48f2c',
+            'glance_region': 'not a region',
+            'glance_service_interface': 'not an interface',
+        }
+
+        # A bad region or interface will cause us to fail when we
+        # contact the remote glance.
+        self.client.image_import(image_id, method='glance-download',
+                                 import_params=params)
+        waiters.wait_for_image_tasks_status(self.client, image_id, 'failure')
+
+        # Make sure we reverted the image status to queued on failure, and that
+        # our extra properties are still in place.
+        image = self.client.show_image(image_id)
+        self.assertEqual('queued', image['status'])
+        self.assertEqual('1', image['hw_cpu_cores'])
+        self.assertEqual('windows', image['os_distro'])
+
+    @decorators.attr(type=['negative'])
+    @decorators.idempotent_id('c7edec8e-24b5-416a-9d42-b3e773bab62c')
+    def test_image_glance_download_import_bad_missing_image(self):
+        self._require_import_method('glance-download')
+        image_id = self._create_image()['id']
+        params = {
+            'glance_image_id': '36d4b546-64a2-4bb9-bdd0-ba676aa48f2c',
+            'glance_region': self.client.region,
+            'glance_service_interface': 'public',
+        }
+
+        # A non-existent image will cause us to fail when we
+        # contact the remote glance.
+        self.client.image_import(image_id, method='glance-download',
+                                 import_params=params)
+        waiters.wait_for_image_tasks_status(self.client, image_id, 'failure')
+
+        # Make sure we reverted the image status to queued on failure
+        image = self.client.show_image(image_id)
+        self.assertEqual('queued', image['status'])
+
     @decorators.idempotent_id('e04761a1-22af-42c2-b8bc-a34a3f12b585')
     def test_remote_import(self):
         """Test image import against a different worker than stage.
@@ -228,10 +363,12 @@
 
         if all_stores:
             stores_list = ','.join([store['id']
-                                    for store in self.available_stores])
+                                    for store in self.available_stores
+                                    if store.get('read-only') != 'true'])
         else:
-            stores = [store['id'] for store in self.available_stores]
-            stores_list = stores[::len(stores) - 1]
+            stores = [store['id'] for store in self.available_stores
+                      if store.get('read-only') != 'true']
+            stores_list = stores[::max(1, len(stores) - 1)]
 
         return body, stores_list
 
@@ -630,3 +767,280 @@
         fetched_images = self.alt_img_client.list_images(params)['images']
         self.assertEqual(1, len(fetched_images))
         self.assertEqual(image['id'], fetched_images[0]['id'])
+
+
+class ImageLocationsTest(base.BaseV2ImageTest):
+    @classmethod
+    def skip_checks(cls):
+        super(ImageLocationsTest, cls).skip_checks()
+        if not CONF.image_feature_enabled.manage_locations:
+            skip_msg = (
+                "%s skipped as show_multiple_locations is not available" % (
+                    cls.__name__))
+            raise cls.skipException(skip_msg)
+
+    @decorators.idempotent_id('58b0fadc-219d-40e1-b159-1c902cec323a')
+    def test_location_after_upload(self):
+        image = self.client.create_image(container_format='bare',
+                                         disk_format='raw')
+
+        # Locations should be empty when there is no data
+        self.assertEqual('queued', image['status'])
+        self.assertEqual([], image['locations'])
+
+        # Now try uploading an image file
+        file_content = data_utils.random_bytes()
+        image_file = io.BytesIO(file_content)
+        self.client.store_image_file(image['id'], image_file)
+        waiters.wait_for_image_status(self.client, image['id'], 'active')
+
+        # Locations should now have one item
+        image = self.client.show_image(image['id'])
+        self.assertEqual(1, len(image['locations']),
+                         'Expected one location in %r' % image['locations'])
+
+        # NOTE(danms): If show_image_direct_url is enabled, then this
+        # will be present. If so, it should match the one location we set
+        if 'direct_url' in image:
+            self.assertEqual(image['direct_url'], image['locations'][0]['url'])
+
+        return image
+
+    def _check_set_location(self):
+        image = self.client.create_image(container_format='bare',
+                                         disk_format='raw')
+
+        # Locations should be empty when there is no data
+        self.assertEqual('queued', image['status'])
+        self.assertEqual([], image['locations'])
+
+        # Add a new location
+        new_loc = {'metadata': {'foo': 'bar'},
+                   'url': CONF.image.http_image}
+        self.client.update_image(image['id'], [
+            dict(add='/locations/-', value=new_loc)])
+
+        # The image should now be active, with one location that looks
+        # like we expect
+        image = self.client.show_image(image['id'])
+        self.assertEqual(1, len(image['locations']),
+                         'Image should have one location but has %i' % (
+                         len(image['locations'])))
+        self.assertEqual(new_loc['url'], image['locations'][0]['url'])
+        self.assertEqual('bar', image['locations'][0]['metadata'].get('foo'))
+        if 'direct_url' in image:
+            self.assertEqual(image['direct_url'], image['locations'][0]['url'])
+
+        # If we added the location directly, the image goes straight
+        # to active and no hashing is done
+        self.assertEqual('active', image['status'])
+        self.assertIsNone(None, image['os_hash_algo'])
+        self.assertIsNone(None, image['os_hash_value'])
+
+        return image
+
+    @decorators.idempotent_id('37599b8a-d5c0-4590-aee5-73878502be15')
+    def test_set_location(self):
+        self._check_set_location()
+
+    def _check_set_multiple_locations(self):
+        image = self._check_set_location()
+
+        new_loc = {'metadata': {'speed': '88mph'},
+                   'url': '%s#new' % CONF.image.http_image}
+        self.client.update_image(image['id'], [
+            dict(add='/locations/-', value=new_loc)])
+
+        # The image should now have two locations and the last one
+        # (locations are ordered) should have the new URL.
+        image = self.client.show_image(image['id'])
+        self.assertEqual(2, len(image['locations']),
+                         'Image should have two locations but has %i' % (
+                         len(image['locations'])))
+        self.assertEqual(new_loc['url'], image['locations'][1]['url'])
+
+        # The image should still be active and still have no hashes
+        self.assertEqual('active', image['status'])
+        self.assertIsNone(None, image['os_hash_algo'])
+        self.assertIsNone(None, image['os_hash_value'])
+
+        # The direct_url should still match the first location
+        if 'direct_url' in image:
+            self.assertEqual(image['direct_url'], image['locations'][0]['url'])
+
+        return image
+
+    @decorators.idempotent_id('bf6e0009-c039-4884-b498-db074caadb10')
+    def test_replace_location(self):
+        image = self._check_set_multiple_locations()
+        original_locs = image['locations']
+
+        # Replacing with the exact thing should work
+        self.client.update_image(image['id'], [
+            dict(replace='/locations', value=image['locations'])])
+
+        # Changing metadata on a location should work
+        original_locs[0]['metadata']['date'] = '2015-10-15'
+        self.client.update_image(image['id'], [
+            dict(replace='/locations', value=original_locs)])
+
+        # Deleting a location should not work
+        self.assertRaises(
+            lib_exc.BadRequest,
+            self.client.update_image,
+            image['id'], [
+                dict(replace='/locations', value=[original_locs[0]])])
+
+        # Replacing a location (with a different URL) should not work
+        new_loc = {'metadata': original_locs[1]['metadata'],
+                   'url': '%s#new3' % CONF.image.http_image}
+        self.assertRaises(
+            lib_exc.BadRequest,
+            self.client.update_image,
+            image['id'], [
+                dict(replace='/locations', value=[original_locs[0],
+                                                  new_loc])])
+
+        # Make sure the locations haven't changed with the above failures,
+        # but the metadata we updated should be changed.
+        image = self.client.show_image(image['id'])
+        self.assertEqual(2, len(image['locations']),
+                         'Image should have two locations but has %i' % (
+                         len(image['locations'])))
+        self.assertEqual(original_locs, image['locations'])
+
+    @decorators.idempotent_id('8a648de4-b745-4c28-a7b5-20de1c3da4d2')
+    def test_delete_locations(self):
+        image = self._check_set_multiple_locations()
+        expected_remaining_loc = image['locations'][1]
+
+        self.client.update_image(image['id'], [
+            dict(remove='/locations/0')])
+
+        # The image should now have only the one location we did not delete
+        image = self.client.show_image(image['id'])
+        self.assertEqual(1, len(image['locations']),
+                         'Image should have one location but has %i' % (
+                         len(image['locations'])))
+        self.assertEqual(expected_remaining_loc['url'],
+                         image['locations'][0]['url'])
+
+        # The direct_url should now be the last remaining location
+        if 'direct_url' in image:
+            self.assertEqual(image['direct_url'], image['locations'][0]['url'])
+
+        # Removing the last location should be disallowed
+        self.assertRaises(lib_exc.Forbidden,
+                          self.client.update_image, image['id'], [
+                              dict(remove='/locations/0')])
+
+    @decorators.idempotent_id('a9a20396-8399-4b36-909d-564949be098f')
+    def test_set_location_bad_scheme(self):
+        image = self.client.create_image(container_format='bare',
+                                         disk_format='raw')
+
+        # Locations should be empty when there is no data
+        self.assertEqual('queued', image['status'])
+        self.assertEqual([], image['locations'])
+
+        # Adding a new location using a scheme that is not allowed
+        # should result in an error
+        new_loc = {'metadata': {'foo': 'bar'},
+                   'url': 'gopher://info.cern.ch'}
+        self.assertRaises(lib_exc.BadRequest,
+                          self.client.update_image, image['id'], [
+                              dict(add='/locations/-', value=new_loc)])
+
+    def _check_set_location_with_hash(self):
+        image = self.client.create_image(container_format='bare',
+                                         disk_format='raw')
+
+        # Create a new location with validation data
+        new_loc = {'validation_data': {'checksum': '1' * 32,
+                                       'os_hash_value': 'deadbeef' * 16,
+                                       'os_hash_algo': 'sha512'},
+                   'metadata': {},
+                   'url': CONF.image.http_image}
+        self.client.update_image(image['id'], [
+            dict(add='/locations/-', value=new_loc)])
+
+        # Expect that all of our values ended up on the image
+        image = self.client.show_image(image['id'])
+        self.assertEqual(1, len(image['locations']))
+        self.assertEqual('1' * 32, image['checksum'])
+        self.assertEqual('deadbeef' * 16, image['os_hash_value'])
+        self.assertEqual('sha512', image['os_hash_algo'])
+        self.assertNotIn('validation_data', image['locations'][0])
+        self.assertEqual('active', image['status'])
+
+        return image
+
+    @decorators.idempotent_id('42d6f7db-c9f5-4bae-9e15-a90262fe445a')
+    def test_set_location_with_hash(self):
+        self._check_set_location_with_hash()
+
+    @decorators.idempotent_id('304c8a19-aa86-47dd-a022-ec4c7f433f1b')
+    def test_set_location_with_hash_second_matching(self):
+        orig_image = self._check_set_location_with_hash()
+
+        new_loc = {
+            'validation_data': {'checksum': orig_image['checksum'],
+                                'os_hash_value': orig_image['os_hash_value'],
+                                'os_hash_algo': orig_image['os_hash_algo']},
+            'metadata': {},
+            'url': '%s#new' % CONF.image.http_image}
+        self.client.update_image(orig_image['id'], [
+            dict(add='/locations/-', value=new_loc)])
+
+        # Setting the same exact values on a new location should work
+        image = self.client.show_image(orig_image['id'])
+        self.assertEqual(2, len(image['locations']))
+        self.assertEqual(orig_image['checksum'], image['checksum'])
+        self.assertEqual(orig_image['os_hash_value'], image['os_hash_value'])
+        self.assertEqual(orig_image['os_hash_algo'], image['os_hash_algo'])
+        self.assertNotIn('validation_data', image['locations'][0])
+        self.assertNotIn('validation_data', image['locations'][1])
+
+    @decorators.idempotent_id('f3ce99c2-9ffb-4b9f-b2cb-876929382553')
+    def test_set_location_with_hash_not_matching(self):
+        orig_image = self._check_set_location_with_hash()
+        values = {
+            'checksum': '2' * 32,
+            'os_hash_value': 'beefdead' * 16,
+            'os_hash_algo': 'sha256',
+        }
+
+        # Try to set a new location with one each of the above
+        # substitutions
+        for k, v in values.items():
+            new_loc = {
+                'validation_data': {
+                    'checksum': orig_image['checksum'],
+                    'os_hash_value': orig_image['os_hash_value'],
+                    'os_hash_algo': orig_image['os_hash_algo']},
+                'metadata': {},
+                'url': '%s#new' % CONF.image.http_image}
+            new_loc['validation_data'][k] = v
+
+            # This should always fail due to the mismatch
+            self.assertRaises(lib_exc.Conflict,
+                              self.client.update_image,
+                              orig_image['id'], [
+                                  dict(add='/locations/-', value=new_loc)])
+
+        # Now try to add a new location with all of the substitutions,
+        # which should also fail
+        new_loc['validation_data'] = values
+        self.assertRaises(lib_exc.Conflict,
+                          self.client.update_image,
+                          orig_image['id'], [
+                              dict(add='/locations/-', value=new_loc)])
+
+        # Make sure nothing has changed on our image after all the
+        # above failures
+        image = self.client.show_image(orig_image['id'])
+        self.assertEqual(1, len(image['locations']))
+        self.assertEqual(orig_image['checksum'], image['checksum'])
+        self.assertEqual(orig_image['os_hash_value'], image['os_hash_value'])
+        self.assertEqual(orig_image['os_hash_algo'], image['os_hash_algo'])
+        self.assertNotIn('validation_data', image['locations'][0])
diff --git a/tempest/api/image/v2/test_images_negative.py b/tempest/api/image/v2/test_images_negative.py
index a3802a9..80c01a5 100644
--- a/tempest/api/image/v2/test_images_negative.py
+++ b/tempest/api/image/v2/test_images_negative.py
@@ -206,7 +206,7 @@
         # import image from web to backend
         image_uri = 'http://does-not.exist/no/possible/way'
         self.client.image_import(image['id'], method='web-download',
-                                 image_uri=image_uri,
+                                 import_params={'uri': image_uri},
                                  stores=[stores[0]['id']])
 
         start_time = int(time.time())
diff --git a/tempest/api/object_storage/base.py b/tempest/api/object_storage/base.py
index 8d8039b..7107dc4 100644
--- a/tempest/api/object_storage/base.py
+++ b/tempest/api/object_storage/base.py
@@ -16,6 +16,7 @@
 import time
 
 from tempest.common import custom_matchers
+from tempest.common import waiters
 from tempest import config
 from tempest.lib.common.utils import data_utils
 from tempest.lib import exceptions as lib_exc
@@ -124,6 +125,9 @@
                                                 object_name,
                                                 data,
                                                 metadata=metadata)
+                waiters.wait_for_object_create(cls.object_client,
+                                               container_name,
+                                               object_name)
                 return object_name, data
             # after bucket creation we might see Conflict
             except lib_exc.Conflict as e:
diff --git a/tempest/api/object_storage/test_container_quotas.py b/tempest/api/object_storage/test_container_quotas.py
index 7977a7a..fb67fb4 100644
--- a/tempest/api/object_storage/test_container_quotas.py
+++ b/tempest/api/object_storage/test_container_quotas.py
@@ -15,6 +15,7 @@
 
 from tempest.api.object_storage import base
 from tempest.common import utils
+from tempest.common import waiters
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 from tempest.lib import exceptions as lib_exc
@@ -91,6 +92,9 @@
         for _ in range(QUOTA_COUNT):
             name = data_utils.rand_name(name="TestObject")
             self.object_client.create_object(self.container_name, name, "")
+            waiters.wait_for_object_create(self.object_client,
+                                           self.container_name,
+                                           name)
 
         nbefore = self._get_object_count()
         self.assertEqual(nbefore, QUOTA_COUNT)
diff --git a/tempest/api/object_storage/test_container_sync.py b/tempest/api/object_storage/test_container_sync.py
index 6b1f849..b31ff76 100644
--- a/tempest/api/object_storage/test_container_sync.py
+++ b/tempest/api/object_storage/test_container_sync.py
@@ -126,7 +126,7 @@
                 self.assertEqual(object_content, obj_name[::-1].encode())
 
     @decorators.attr(type='slow')
-    @decorators.unstable_test(bug='1317133')
+    @decorators.skip_because(bug='1317133')
     @decorators.idempotent_id('be008325-1bba-4925-b7dd-93b58f22ce9b')
     @testtools.skipIf(
         not CONF.object_storage_feature_enabled.container_sync,
diff --git a/tempest/api/object_storage/test_object_services.py b/tempest/api/object_storage/test_object_services.py
index 2823185..7d5bd26 100644
--- a/tempest/api/object_storage/test_object_services.py
+++ b/tempest/api/object_storage/test_object_services.py
@@ -182,17 +182,14 @@
         self.assertEqual(data, body)
 
     @decorators.idempotent_id('4f84422a-e2f2-4403-b601-726a4220b54e')
-    @decorators.unstable_test(bug='1905432')
     def test_create_object_with_transfer_encoding(self):
         """Test creating object with transfer_encoding"""
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes(1024)
-        headers = {'Transfer-Encoding': 'chunked'}
         resp, _ = self.object_client.create_object(
             self.container_name,
             object_name,
             data=data_utils.chunkify(data, 512),
-            headers=headers,
             chunked=True)
 
         self.assertHeaders(resp, 'Object', 'PUT')
diff --git a/tempest/api/object_storage/test_object_temp_url.py b/tempest/api/object_storage/test_object_temp_url.py
index e75e22a..8f218e2 100644
--- a/tempest/api/object_storage/test_object_temp_url.py
+++ b/tempest/api/object_storage/test_object_temp_url.py
@@ -19,9 +19,12 @@
 
 from tempest.api.object_storage import base
 from tempest.common import utils
+from tempest import config
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 
+CONF = config.CONF
+
 
 class ObjectTempUrlTest(base.BaseObjectTest):
     """Test object temp url"""
@@ -77,8 +80,11 @@
             container, object_name)
 
         hmac_body = '%s\n%s\n%s' % (method, expires, path)
+        hlib = getattr(
+            hashlib,
+            CONF.object_storage_feature_enabled.tempurl_digest_hashlib)
         sig = hmac.new(
-            key.encode(), hmac_body.encode(), hashlib.sha1
+            key.encode(), hmac_body.encode(), hlib
         ).hexdigest()
 
         url = "%s/%s?temp_url_sig=%s&temp_url_expires=%s" % (container,
diff --git a/tempest/api/object_storage/test_object_temp_url_negative.py b/tempest/api/object_storage/test_object_temp_url_negative.py
index 4ad8428..712697e 100644
--- a/tempest/api/object_storage/test_object_temp_url_negative.py
+++ b/tempest/api/object_storage/test_object_temp_url_negative.py
@@ -19,10 +19,13 @@
 
 from tempest.api.object_storage import base
 from tempest.common import utils
+from tempest import config
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 from tempest.lib import exceptions as lib_exc
 
+CONF = config.CONF
+
 
 class ObjectTempUrlNegativeTest(base.BaseObjectTest):
     """Negative tests of object temp url"""
@@ -82,8 +85,11 @@
             container, object_name)
 
         hmac_body = '%s\n%s\n%s' % (method, expires, path)
+        hlib = getattr(
+            hashlib,
+            CONF.object_storage_feature_enabled.tempurl_digest_hashlib)
         sig = hmac.new(
-            key.encode(), hmac_body.encode(), hashlib.sha1
+            key.encode(), hmac_body.encode(), hlib
         ).hexdigest()
 
         url = "%s/%s?temp_url_sig=%s&temp_url_expires=%s" % (container,
diff --git a/tempest/api/volume/admin/test_backends_capabilities.py b/tempest/api/volume/admin/test_backends_capabilities.py
index 3c76eca..e3a8156 100644
--- a/tempest/api/volume/admin/test_backends_capabilities.py
+++ b/tempest/api/volume/admin/test_backends_capabilities.py
@@ -37,6 +37,33 @@
         # Check response schema
         self.admin_capabilities_client.show_backend_capabilities(self.hosts[0])
 
+    @staticmethod
+    def _change_capabilities_storage_protocol(capabilities):
+        """Convert storage_protocol to its canonical version"""
+        # List of storage protocols variants defined in cinder.common.constants
+        # The canonical name for storage protocol comes first in the list
+        VARIANTS = [['iSCSI', 'iscsi'], ['FC', 'fibre_channel', 'fc'],
+                    ['NFS', 'nfs'], ['NVMe-oF', 'NVMeOF', 'nvmeof']]
+
+        capabilities = sorted(list(capabilities))
+
+        # Cinder Bug #1966103: Some drivers were reporting different strings
+        # to represent the same storage protocol. For backward compatibility,
+        # the scheduler can handle the variants, but to standardize this for
+        # operators (who may need to refer to the protocol in volume-type
+        # extra-specs), the get-pools and get-capabilities response was changed
+        # to only report the canonical name for a storage protocol, but these
+        # 2 REST API call swere not changed simultaneously, so we may or may
+        # not get canonical names, so just convert canonical names.
+        for item in range(len(capabilities)):
+            for variants in VARIANTS:
+                if capabilities[item][2] in variants:
+                    capabilities[item] = (capabilities[item][0],
+                                          capabilities[item][1],
+                                          variants[0])
+
+        return capabilities
+
     @decorators.idempotent_id('a9035743-d46a-47c5-9cb7-3c80ea16dea0')
     def test_compare_volume_stats_values(self):
         """Test comparing volume stats values
@@ -60,8 +87,9 @@
         ]
 
         # Returns a tuple of VOLUME_STATS values
-        expected_list = sorted(list(map(operator.itemgetter(*VOLUME_STATS),
-                                        cinder_pools)))
-        observed_list = sorted(list(map(operator.itemgetter(*VOLUME_STATS),
-                                        capabilities)))
+        expected_list = self._change_capabilities_storage_protocol(
+            map(operator.itemgetter(*VOLUME_STATS), cinder_pools))
+        observed_list = self._change_capabilities_storage_protocol(
+            map(operator.itemgetter(*VOLUME_STATS), capabilities))
+
         self.assertEqual(expected_list, observed_list)
diff --git a/tempest/api/volume/admin/test_encrypted_volumes_extend.py b/tempest/api/volume/admin/test_encrypted_volumes_extend.py
index 7339179..e85a00d 100644
--- a/tempest/api/volume/admin/test_encrypted_volumes_extend.py
+++ b/tempest/api/volume/admin/test_encrypted_volumes_extend.py
@@ -31,5 +31,18 @@
         "Attached encrypted volume extend is disabled.")
     @utils.services('compute')
     def test_extend_attached_encrypted_volume_luksv1(self):
+        """LUKs v1 decrypts and extends through libvirt."""
         volume = self.create_encrypted_volume(encryption_provider="luks")
         self._test_extend_attached_volume(volume)
+
+    @decorators.idempotent_id('381a2a3a-b2f4-4631-a910-720881f2cc2f')
+    @testtools.skipUnless(
+        CONF.volume_feature_enabled.extend_attached_encrypted_volume,
+        "Attached encrypted volume extend is disabled.")
+    @testtools.skipIf(CONF.volume.storage_protocol == 'ceph',
+                      'Ceph only supports LUKSv2 if doing host attach.')
+    @utils.services('compute')
+    def test_extend_attached_encrypted_volume_luksv2(self):
+        """LUKs v2 decrypts and extends through os-brick."""
+        volume = self.create_encrypted_volume(encryption_provider="luks2")
+        self._test_extend_attached_volume(volume)
diff --git a/tempest/api/volume/test_volume_transfers.py b/tempest/api/volume/test_volume_transfers.py
index f1dec06..62cb203 100644
--- a/tempest/api/volume/test_volume_transfers.py
+++ b/tempest/api/volume/test_volume_transfers.py
@@ -110,9 +110,7 @@
     """Test volume transfer for the "new" Transfers API mv 3.55"""
 
     volume_min_microversion = '3.55'
-    volume_max_microversion = 'latest'
-
-    credentials = ['primary', 'alt', 'admin']
+    volume_max_microversion = '3.56'
 
     @classmethod
     def setup_clients(cls):
@@ -131,3 +129,22 @@
         """Test create, list, delete with volume-transfers API mv 3.55"""
         super(VolumesTransfersV355Test, self). \
             test_create_list_delete_volume_transfer()
+
+
+class VolumesTransfersV357Test(VolumesTransfersV355Test):
+    """Test volume transfer for the "new" Transfers API mv 3.57"""
+
+    volume_min_microversion = '3.57'
+    volume_max_microversion = 'latest'
+
+    @decorators.idempotent_id('d746bd69-bb30-4414-9a1c-577959fac6a1')
+    def test_create_get_list_accept_volume_transfer(self):
+        """Test create, get, list, accept with volume-transfers API mv 3.57"""
+        super(VolumesTransfersV357Test, self). \
+            test_create_get_list_accept_volume_transfer()
+
+    @decorators.idempotent_id('d4b20ec2-e1bb-4068-adcf-6c20020a8e05')
+    def test_create_list_delete_volume_transfer(self):
+        """Test create, list, delete with volume-transfers API mv 3.57"""
+        super(VolumesTransfersV357Test, self). \
+            test_create_list_delete_volume_transfer()
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index a58da7e..b3a04f8 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -45,7 +45,7 @@
     def test_snapshot_create_delete_with_volume_in_use(self):
         """Test create/delete snapshot from volume attached to server"""
         # Create a test instance
-        server = self.create_server()
+        server = self.create_server(wait_until='SSHABLE')
         # NOTE(zhufl) Here we create volume from self.image_ref for adding
         # coverage for "creating snapshot from non-blank volume".
         volume = self.create_volume(imageRef=self.image_ref)
@@ -80,7 +80,7 @@
         snapshot1 = self.create_snapshot(self.volume_origin['id'])
 
         # Create a server and attach it
-        server = self.create_server()
+        server = self.create_server(wait_until='SSHABLE')
         self.attach_volume(server['id'], self.volume_origin['id'])
 
         # Now that the volume is attached, create other snapshots
diff --git a/tempest/clients.py b/tempest/clients.py
index 4c3d875..1aa34d0 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -87,6 +87,7 @@
             self.image_member_client = self.image_v1.ImageMembersClient()
             self.image_client_v2 = self.image_v2.ImagesClient()
             self.image_member_client_v2 = self.image_v2.ImageMembersClient()
+            self.image_cache_client = self.image_v2.ImageCacheClient()
             self.namespaces_client = self.image_v2.NamespacesClient()
             self.resource_types_client = self.image_v2.ResourceTypesClient()
             self.namespace_objects_client = \
@@ -117,7 +118,6 @@
             enable_instance_password=eip)
         self.server_groups_client = self.compute.ServerGroupsClient()
         self.limits_client = self.compute.LimitsClient()
-        self.compute_images_client = self.compute.ImagesClient()
         self.keypairs_client = self.compute.KeyPairsClient(
             ssh_key_type=CONF.validation.ssh_key_type)
         self.quotas_client = self.compute.QuotasClient()
@@ -144,6 +144,8 @@
         self.tenant_networks_client = self.compute.TenantNetworksClient()
         self.assisted_volume_snapshots_client = (
             self.compute.AssistedVolumeSnapshotsClient())
+        self.server_external_events_client = (
+            self.compute.ServerExternalEventsClient())
 
         # NOTE: The following client needs special timeout values because
         # the API is a proxy for the other component.
@@ -157,6 +159,8 @@
             **params_volume)
         self.snapshots_extensions_client = self.compute.SnapshotsClient(
             **params_volume)
+        self.compute_images_client = self.compute.ImagesClient(
+            build_timeout=CONF.image.build_timeout)
 
     def _set_placement_clients(self):
         self.placement_client = self.placement.PlacementClient()
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index eb7e366..be8766d 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -209,13 +209,6 @@
                                    kwargs.get('max_count', 0)) > 1)
 
     if CONF.validation.run_validation and validatable:
-        # As a first implementation, multiple pingable or sshable servers will
-        # not be supported
-        if multiple_create_request:
-            msg = ("Multiple pingable or sshable servers not supported at "
-                   "this stage.")
-            raise ValueError(msg)
-
         LOG.debug("Provisioning test server with validation resources %s",
                   validation_resources)
         if 'security_groups' in kwargs:
@@ -298,11 +291,11 @@
     if multiple_create_request:
         # Get servers created which name match with name param.
         body_servers = clients.servers_client.list_servers()
-        servers = \
+        created_servers = \
             [s for s in body_servers['servers'] if s['name'].startswith(name)]
     else:
         body = rest_client.ResponseBody(body.response, body['server'])
-        servers = [body]
+        created_servers = [body]
 
     if wait_until:
 
@@ -314,11 +307,19 @@
             wait_until_extra = wait_until
             wait_until = 'ACTIVE'
 
-        for server in servers:
-            try:
-                waiters.wait_for_server_status(
+        servers = []
+        try:
+            # Wait for server to be in active state and populate servers list
+            # with those full server response so that we will have addresses
+            # field present in server which is needed to be used for wait for
+            # ssh
+            for server in created_servers:
+                server = waiters.wait_for_server_status(
                     clients.servers_client, server['id'], wait_until,
                     request_id=request_id)
+                servers.append(server)
+
+            for server in servers:
                 if CONF.validation.run_validation and validatable:
                     if CONF.validation.connect_method == 'floating':
                         _setup_validation_fip(
@@ -329,31 +330,33 @@
                             server, clients, tenant_network,
                             validatable, validation_resources,
                             wait_until_extra, False)
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                for server in created_servers:
+                    try:
+                        clients.servers_client.delete_server(
+                            server['id'])
+                    except Exception:
+                        LOG.exception('Deleting server %s failed',
+                                      server['id'])
+                for server in created_servers:
+                    # NOTE(artom) If the servers were booted with volumes
+                    # and with delete_on_termination=False we need to wait
+                    # for the servers to go away before proceeding with
+                    # cleanup, otherwise we'll attempt to delete the
+                    # volumes while they're still attached to servers that
+                    # are in the process of being deleted.
+                    try:
+                        waiters.wait_for_server_termination(
+                            clients.servers_client, server['id'])
+                    except Exception:
+                        LOG.exception('Server %s failed to delete in time',
+                                      server['id'])
+        if servers and not multiple_create_request:
+            body = rest_client.ResponseBody(body.response, servers[0])
+        return body, servers
 
-            except Exception:
-                with excutils.save_and_reraise_exception():
-                    for server in servers:
-                        try:
-                            clients.servers_client.delete_server(
-                                server['id'])
-                        except Exception:
-                            LOG.exception('Deleting server %s failed',
-                                          server['id'])
-                    for server in servers:
-                        # NOTE(artom) If the servers were booted with volumes
-                        # and with delete_on_termination=False we need to wait
-                        # for the servers to go away before proceeding with
-                        # cleanup, otherwise we'll attempt to delete the
-                        # volumes while they're still attached to servers that
-                        # are in the process of being deleted.
-                        try:
-                            waiters.wait_for_server_termination(
-                                clients.servers_client, server['id'])
-                        except Exception:
-                            LOG.exception('Server %s failed to delete in time',
-                                          server['id'])
-
-    return body, servers
+    return body, created_servers
 
 
 def shelve_server(servers_client, server_id, force_shelve_offload=False):
diff --git a/tempest/common/utils/net_downtime.py b/tempest/common/utils/net_downtime.py
new file mode 100644
index 0000000..9675ec8
--- /dev/null
+++ b/tempest/common/utils/net_downtime.py
@@ -0,0 +1,63 @@
+# Copyright 2022 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+import signal
+import subprocess
+
+import fixtures
+
+from oslo_log import log
+
+
+LOG = log.getLogger(__name__)
+
+
+class NetDowntimeMeter(fixtures.Fixture):
+    def __init__(self, dest_ip, interval='0.2'):
+        self.dest_ip = dest_ip
+        # Note: for intervals lower than 0.2 ping requires root privileges
+        self.interval = interval
+        self.ping_process = None
+
+    def _setUp(self):
+        self.start_background_pinger()
+
+    def start_background_pinger(self):
+        cmd = ['ping', '-q', '-s1']
+        cmd.append('-i{}'.format(self.interval))
+        cmd.append(self.dest_ip)
+        LOG.debug("Starting background pinger to '{}' with interval {}".format(
+            self.dest_ip, self.interval))
+        self.ping_process = subprocess.Popen(
+            cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+        self.addCleanup(self.cleanup)
+
+    def cleanup(self):
+        if self.ping_process and self.ping_process.poll() is None:
+            LOG.debug('Terminating background pinger with pid {}'.format(
+                self.ping_process.pid))
+            self.ping_process.terminate()
+        self.ping_process = None
+
+    def get_downtime(self):
+        self.ping_process.send_signal(signal.SIGQUIT)
+        # Example of the expected output:
+        # 264/274 packets, 3% loss
+        output = self.ping_process.stderr.readline().strip().decode('utf-8')
+        if output and len(output.split()[0].split('/')) == 2:
+            succ, total = output.split()[0].split('/')
+            return (int(total) - int(succ)) * float(self.interval)
+        else:
+            LOG.warning('Unexpected output obtained from the pinger: %s',
+                        output)
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index ab401fb..45a7b8a 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -49,19 +49,19 @@
         # between the UNKNOWN->ACTIVE transition.
         # TODO(afazekas): enumerate and validate the stable status set
         if status == 'BUILD' and server_status != 'UNKNOWN':
-            return
+            return body
         if server_status == status:
             if ready_wait:
                 if status == 'BUILD':
-                    return
+                    return body
                 # NOTE(afazekas): The instance is in "ready for action state"
                 # when no task in progress
                 if task_state is None:
                     # without state api extension 3 sec usually enough
                     time.sleep(CONF.compute.ready_wait)
-                    return
+                    return body
             else:
-                return
+                return body
 
         time.sleep(client.build_interval)
         body = client.show_server(server_id)['server']
@@ -233,6 +233,26 @@
 
     exc_cls = lib_exc.TimeoutException
     start = int(time.time())
+
+    # NOTE(danms): Don't wait for stores that are read-only as those
+    # will never complete
+    try:
+        store_info = client.info_stores()['stores']
+        stores = ','.join(sorted([
+            store['id'] for store in store_info
+            if store.get('read-only') != 'true' and
+            (not stores or store['id'] in stores.split(','))]))
+    except lib_exc.NotFound:
+        # If multi-store is not enabled, then we can not resolve which
+        # ones are read-only, and stores must have been passed as None
+        # anyway for us to succeed. If not, then we should raise right
+        # now and avoid waiting since we will never see the stores
+        # appear.
+        if stores is not None:
+            raise lib_exc.TimeoutException(
+                'Image service has no store support; '
+                'cowardly refusing to wait for them.')
+
     while int(time.time()) - start < client.build_timeout:
         image = client.show_image(image_id)
         if image['status'] == 'active' and (stores is None or
@@ -584,6 +604,22 @@
     raise lib_exc.TimeoutException()
 
 
+def wait_for_port_status(client, port_id, status):
+    """Wait for a port reach a certain status : ["BUILD" | "DOWN" | "ACTIVE"]
+    :param client: The network client to use when querying the port's
+    status
+    :param status: A string to compare the current port status-to.
+    :param port_id: The uuid of the port we would like queried for status.
+    """
+    start_time = time.time()
+    while (time.time() - start_time <= client.build_timeout):
+        result = client.show_port(port_id)
+        if result['port']['status'].lower() == status.lower():
+            return result
+        time.sleep(client.build_interval)
+    raise lib_exc.TimeoutException
+
+
 def wait_for_ssh(ssh_client, timeout=30):
     """Waits for SSH connection to become usable"""
     start_time = int(time.time())
@@ -594,3 +630,35 @@
         except lib_exc.SSHTimeout:
             pass
     raise lib_exc.TimeoutException()
+
+
+def wait_for_caching(client, cache_client, image_id):
+    """Waits until image is cached"""
+    start = int(time.time())
+    while int(time.time()) - start < client.build_timeout:
+        caching = cache_client.list_cache()
+        output = [image['image_id'] for image in caching['cached_images']]
+        if output and image_id in output:
+            return caching
+
+        time.sleep(client.build_interval)
+
+    message = ('Image %s failed to cache in time.' % image_id)
+    caller = test_utils.find_test_caller()
+    if caller:
+        message = '(%s) %s' % (caller, message)
+    raise lib_exc.TimeoutException(message)
+
+
+def wait_for_object_create(object_client, container_name, object_name,
+                           interval=1):
+    """Waits for created object to become available"""
+    start_time = time.time()
+    while time.time() - start_time < object_client.build_timeout:
+        try:
+            return object_client.get_object(container_name, object_name)
+        except lib_exc.NotFound:
+            time.sleep(interval)
+    message = ('Object %s failed to create within the required time (%s s).' %
+               (object_name, object_client.build_timeout))
+    raise lib_exc.TimeoutException(message)
diff --git a/tempest/config.py b/tempest/config.py
index 2fb43a0..d91fca4 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -674,6 +674,11 @@
                         'publicURL', 'adminURL', 'internalURL'],
                help=("The endpoint type to use for the alternate image "
                      "service.")),
+    cfg.BoolOpt('image_caching_enabled',
+                default=False,
+                help=("Flag to enable if caching is enabled by image "
+                      "service, operator should set this parameter to True"
+                      "if 'image_cache_dir' is set in glance-api.conf")),
     cfg.StrOpt('http_image',
                default='http://download.cirros-cloud.net/0.3.1/'
                'cirros-0.3.1-x86_64-uec.tar.gz',
@@ -730,6 +735,11 @@
     cfg.BoolOpt('os_glance_reserved',
                 default=False,
                 help="Should we check that os_glance namespace is reserved"),
+    cfg.BoolOpt('manage_locations',
+                default=False,
+                help=('Is show_multiple_locations enabled in glance. '
+                      'Note that at least one http store must be enabled as '
+                      'well, because we use that location scheme to test.')),
 ]
 
 network_group = cfg.OptGroup(name='network',
@@ -962,9 +972,15 @@
                help="Network used for SSH connections. Ignored if "
                     "connect_method=floating."),
     cfg.StrOpt('ssh_key_type',
-               default='rsa',
+               default='ecdsa',
                help='Type of key to use for ssh connections. '
                     'Valid types are rsa, ecdsa'),
+    cfg.IntOpt('allowed_network_downtime',
+               default=5.0,
+               help="Allowed VM network connection downtime during live "
+                    "migration, in seconds. "
+                    "When the measured downtime exceeds this value, an "
+                    "exception is raised."),
 ]
 
 volume_group = cfg.OptGroup(name='volume',
@@ -1137,6 +1153,9 @@
                help="One name of cluster which is set in the realm whose name "
                     "is set in 'realm_name' item in this file. Set the "
                     "same cluster name as Swift's container-sync-realms.conf"),
+    cfg.IntOpt('build_timeout',
+               default=10,
+               help="Timeout in seconds to wait for objects to create."),
 ]
 
 object_storage_feature_group = cfg.OptGroup(
@@ -1158,6 +1177,11 @@
     cfg.BoolOpt('discoverability',
                 default=True,
                 help="Execute discoverability tests"),
+    cfg.StrOpt('tempurl_digest_hashlib',
+               default='sha256',
+               help="Hashing algorithm to use for the temp_url tests. "
+                    "Needs to be supported both by Swift and the "
+                    "hashlib module, for example sha1 or sha256"),
 ]
 
 
@@ -1223,29 +1247,46 @@
 EnforceScopeGroup = [
     cfg.BoolOpt('nova',
                 default=False,
-                help='Does the compute service API policies enforce scope? '
-                     'This configuration value should be same as '
-                     'nova.conf: [oslo_policy].enforce_scope option.'),
+                help='Does the compute service API policies enforce scope and '
+                     'new defaults? This configuration value should be '
+                     'enabled when nova.conf: [oslo_policy]. '
+                     'enforce_new_defaults and nova.conf: [oslo_policy]. '
+                     'enforce_scope options are enabled in nova conf.'),
     cfg.BoolOpt('neutron',
                 default=False,
-                help='Does the network service API policies enforce scope? '
-                     'This configuration value should be same as '
-                     'neutron.conf: [oslo_policy].enforce_scope option.'),
+                help='Does the network service API policies enforce scope and '
+                     'new defaults? This configuration value should be '
+                     'enabled when neutron.conf: [oslo_policy]. '
+                     'enforce_new_defaults and neutron.conf: [oslo_policy]. '
+                     'enforce_scope options are enabled in neutron conf.'),
     cfg.BoolOpt('glance',
                 default=False,
-                help='Does the Image service API policies enforce scope? '
-                     'This configuration value should be same as '
-                     'glance.conf: [oslo_policy].enforce_scope option.'),
+                help='Does the Image service API policies enforce scope and '
+                     'new defaults? This configuration value should be '
+                     'enabled when glance.conf: [oslo_policy]. '
+                     'enforce_new_defaults and glance.conf: [oslo_policy]. '
+                     'enforce_scope options are enabled in glance conf.'),
     cfg.BoolOpt('cinder',
                 default=False,
-                help='Does the Volume service API policies enforce scope? '
-                     'This configuration value should be same as '
-                     'cinder.conf: [oslo_policy].enforce_scope option.'),
+                help='Does the Volume service API policies enforce scope and '
+                     'new defaults? This configuration value should be '
+                     'enabled when cinder.conf: [oslo_policy]. '
+                     'enforce_new_defaults and cinder.conf: [oslo_policy]. '
+                     'enforce_scope options are enabled in cinder conf.'),
     cfg.BoolOpt('keystone',
                 default=False,
-                help='Does the Identity service API policies enforce scope? '
-                     'This configuration value should be same as '
-                     'keystone.conf: [oslo_policy].enforce_scope option.'),
+                help='Does the Identity service API policies enforce scope '
+                     'and new defaults? This configuration value should be '
+                     'enabled when keystone.conf: [oslo_policy]. '
+                     'enforce_new_defaults and keystone.conf: [oslo_policy]. '
+                     'enforce_scope options are enabled in keystone conf.'),
+    cfg.BoolOpt('placement',
+                default=False,
+                help='Does the placement service API policies enforce scope '
+                     'and new defaults? This configuration value should be '
+                     'enabled when placement.conf: [oslo_policy]. '
+                     'enforce_new_defaults and nova.conf: [oslo_policy]. '
+                     'enforce_scope options are enabled in placement conf.'),
 ]
 
 debug_group = cfg.OptGroup(name="debug",
diff --git a/tempest/lib/api_schema/response/compute/v2_1/server_external_events.py b/tempest/lib/api_schema/response/compute/v2_1/server_external_events.py
new file mode 100644
index 0000000..2ab69e2
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_1/server_external_events.py
@@ -0,0 +1,55 @@
+# Copyright 2022 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+create = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'events': {
+                'type': 'array', 'minItems': 1,
+                'items': {
+                    'type': 'object',
+                    'properties': {
+                        'server_uuid': {
+                            'type': 'string', 'format': 'uuid'
+                        },
+                        'name': {
+                            'type': 'string',
+                            'enum': [
+                                'network-changed',
+                                'network-vif-plugged',
+                                'network-vif-unplugged',
+                                'network-vif-deleted'
+                            ],
+                        },
+                        'status': {
+                            'type': 'string',
+                            'enum': ['failed', 'completed', 'in-progress'],
+                        },
+                        'tag': {
+                            'type': 'string', 'maxLength': 255,
+                        },
+                        'code': {'type': 'integer'},
+                    },
+                    'required': [
+                        'server_uuid', 'name', 'code'],
+                    'additionalProperties': False,
+                },
+            },
+        },
+        'required': ['events'],
+        'additionalProperties': False,
+    }
+}
diff --git a/tempest/services/__init__.py b/tempest/lib/api_schema/response/volume/v3_55/__init__.py
similarity index 100%
rename from tempest/services/__init__.py
rename to tempest/lib/api_schema/response/volume/v3_55/__init__.py
diff --git a/tempest/lib/api_schema/response/volume/v3_55/transfers.py b/tempest/lib/api_schema/response/volume/v3_55/transfers.py
new file mode 100644
index 0000000..683c62f
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/v3_55/transfers.py
@@ -0,0 +1,46 @@
+# Copyright 2022 Red Hat, Inc.
+# All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.volume import transfers
+
+# Volume microversion 3.55:
+# Add 'no_snapshots' attribute in 'transfer' responses.
+
+create_volume_transfer = copy.deepcopy(transfers.create_volume_transfer)
+create_volume_transfer['response_body']['properties']['transfer'][
+    'properties'].update({'no_snapshots': {'type': 'boolean'}})
+
+common_show_volume_transfer = copy.deepcopy(
+    transfers.common_show_volume_transfer)
+common_show_volume_transfer['properties'].update(
+    {'no_snapshots': {'type': 'boolean'}})
+
+show_volume_transfer = copy.deepcopy(transfers.show_volume_transfer)
+show_volume_transfer['response_body']['properties'][
+    'transfer'] = common_show_volume_transfer
+
+list_volume_transfers_no_detail = copy.deepcopy(
+    transfers.list_volume_transfers_no_detail)
+
+list_volume_transfers_with_detail = copy.deepcopy(
+    transfers.list_volume_transfers_with_detail)
+list_volume_transfers_with_detail['response_body']['properties']['transfers'][
+    'items'] = common_show_volume_transfer
+
+delete_volume_transfer = copy.deepcopy(transfers.delete_volume_transfer)
+
+accept_volume_transfer = copy.deepcopy(transfers.accept_volume_transfer)
diff --git a/tempest/services/__init__.py b/tempest/lib/api_schema/response/volume/v3_57/__init__.py
similarity index 100%
copy from tempest/services/__init__.py
copy to tempest/lib/api_schema/response/volume/v3_57/__init__.py
diff --git a/tempest/lib/api_schema/response/volume/v3_57/transfers.py b/tempest/lib/api_schema/response/volume/v3_57/transfers.py
new file mode 100644
index 0000000..2fcf0aa
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/v3_57/transfers.py
@@ -0,0 +1,61 @@
+# Copyright 2022 Red Hat, Inc.
+# All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
+from tempest.lib.api_schema.response.volume.v3_55 import transfers
+
+# Volume microversion 3.57:
+# Add these attributes in 'transfer' responses.
+#   'destination_project_id'
+#   'source_project_id'
+#   'accepted'
+
+create_volume_transfer = copy.deepcopy(transfers.create_volume_transfer)
+create_volume_transfer['response_body']['properties']['transfer'][
+    'properties'].update(
+        {'destination_project_id': parameter_types.uuid_or_null})
+create_volume_transfer['response_body']['properties']['transfer'][
+    'properties'].update(
+        {'source_project_id': {'type': 'string', 'format': 'uuid'}})
+create_volume_transfer['response_body']['properties']['transfer'][
+    'properties'].update(
+        {'accepted': {'type': 'boolean'}})
+
+common_show_volume_transfer = copy.deepcopy(
+    transfers.common_show_volume_transfer)
+common_show_volume_transfer['properties'].update(
+    {'destination_project_id': parameter_types.uuid_or_null})
+common_show_volume_transfer['properties'].update(
+    {'source_project_id': {'type': 'string', 'format': 'uuid'}})
+common_show_volume_transfer['properties'].update(
+    {'accepted': {'type': 'boolean'}})
+
+show_volume_transfer = copy.deepcopy(transfers.show_volume_transfer)
+show_volume_transfer['response_body']['properties'][
+    'transfer'] = common_show_volume_transfer
+
+list_volume_transfers_no_detail = copy.deepcopy(
+    transfers.list_volume_transfers_no_detail)
+
+list_volume_transfers_with_detail = copy.deepcopy(
+    transfers.list_volume_transfers_with_detail)
+list_volume_transfers_with_detail['response_body']['properties']['transfers'][
+    'items'] = common_show_volume_transfer
+
+delete_volume_transfer = copy.deepcopy(transfers.delete_volume_transfer)
+
+accept_volume_transfer = copy.deepcopy(transfers.accept_volume_transfer)
diff --git a/tempest/services/__init__.py b/tempest/lib/api_schema/response/volume/v3_61/__init__.py
similarity index 100%
copy from tempest/services/__init__.py
copy to tempest/lib/api_schema/response/volume/v3_61/__init__.py
diff --git a/tempest/lib/api_schema/response/volume/v3_61/volumes.py b/tempest/lib/api_schema/response/volume/v3_61/volumes.py
new file mode 100644
index 0000000..2e28b7e
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/v3_61/volumes.py
@@ -0,0 +1,69 @@
+# Copyright 2022 Red Hat, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
+from tempest.lib.api_schema.response.volume import volumes
+
+# Volume micro version 3.61:
+# 1. Add cluster_name attribute to response body of volume details
+# for admin in Active/Active HA mode.
+# https://docs.openstack.org/cinder/latest/contributor/
+# api_microversion_history.html
+
+common_show_volume = copy.deepcopy(volumes.common_show_volume)
+common_show_volume['properties'].update(
+    {'cluster_name': parameter_types.uuid_or_null})
+
+create_volume = copy.deepcopy(volumes.create_volume)
+create_volume['response_body']['properties']['volume']['properties'].update(
+    {'cluster_name': parameter_types.uuid_or_null})
+
+# copy unchanged volumes schema
+attachments = copy.deepcopy(volumes.attachments)
+list_volumes_no_detail = copy.deepcopy(volumes.list_volumes_no_detail)
+# show_volume refers to common_show_volume
+show_volume = copy.deepcopy(volumes.show_volume)
+show_volume['response_body']['properties']['volume'] = common_show_volume
+# list copy refers to latest common_show_volume
+list_volumes_detail = copy.deepcopy(common_show_volume)
+list_volumes_with_detail = copy.deepcopy(volumes.list_volumes_with_detail)
+list_volumes_with_detail['response_body']['properties']['volumes']['items'] \
+    = list_volumes_detail
+update_volume = copy.deepcopy(volumes.update_volume)
+delete_volume = copy.deepcopy(volumes.delete_volume)
+show_volume_summary = copy.deepcopy(volumes.show_volume_summary)
+attach_volume = copy.deepcopy(volumes.attach_volume)
+set_bootable_volume = copy.deepcopy(volumes.set_bootable_volume)
+detach_volume = copy.deepcopy(volumes.detach_volume)
+reserve_volume = copy.deepcopy(volumes.reserve_volume)
+unreserve_volume = copy.deepcopy(volumes.unreserve_volume)
+extend_volume = copy.deepcopy(volumes.extend_volume)
+reset_volume_status = copy.deepcopy(volumes.reset_volume_status)
+update_volume_readonly = copy.deepcopy(volumes.update_volume_readonly)
+force_delete_volume = copy.deepcopy(volumes.force_delete_volume)
+retype_volume = copy.deepcopy(volumes.retype_volume)
+force_detach_volume = copy.deepcopy(volumes.force_detach_volume)
+create_volume_metadata = copy.deepcopy(volumes.create_volume_metadata)
+show_volume_metadata = copy.deepcopy(volumes.show_volume_metadata)
+update_volume_metadata = copy.deepcopy(volumes.update_volume_metadata)
+update_volume_metadata_item = copy.deepcopy(
+    volumes.update_volume_metadata_item)
+update_volume_image_metadata = copy.deepcopy(
+    volumes.update_volume_image_metadata)
+delete_volume_image_metadata = copy.deepcopy(
+    volumes.delete_volume_image_metadata)
+unmanage_volume = copy.deepcopy(volumes.unmanage_volume)
diff --git a/tempest/services/__init__.py b/tempest/lib/api_schema/response/volume/v3_63/__init__.py
similarity index 100%
copy from tempest/services/__init__.py
copy to tempest/lib/api_schema/response/volume/v3_63/__init__.py
diff --git a/tempest/lib/api_schema/response/volume/v3_63/volumes.py b/tempest/lib/api_schema/response/volume/v3_63/volumes.py
new file mode 100644
index 0000000..218db90
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/v3_63/volumes.py
@@ -0,0 +1,69 @@
+# Copyright 2022 Red Hat, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
+from tempest.lib.api_schema.response.volume.v3_61 import volumes
+
+# Volume micro version 3.63:
+# 1. Includes volume type ID in the volume-show and volume-detail-list.
+# for admin in Active/Active HA mode.
+# https://docs.openstack.org/cinder/latest/contributor/
+# api_microversion_history.html
+
+common_show_volume = copy.deepcopy(volumes.common_show_volume)
+common_show_volume['properties'].update(
+    {'volume_type_id': parameter_types.uuid_or_null})
+
+create_volume = copy.deepcopy(volumes.create_volume)
+create_volume['response_body']['properties']['volume']['properties'].update(
+    {'volume_type_id': parameter_types.uuid_or_null})
+
+# copy unchanged volumes schema
+attachments = copy.deepcopy(volumes.attachments)
+list_volumes_no_detail = copy.deepcopy(volumes.list_volumes_no_detail)
+# show_volume refers to common_show_volume
+show_volume = copy.deepcopy(volumes.show_volume)
+show_volume['response_body']['properties']['volume'] = common_show_volume
+# list copy refers to latest common_show_volume
+list_volumes_detail = copy.deepcopy(common_show_volume)
+list_volumes_with_detail = copy.deepcopy(volumes.list_volumes_with_detail)
+list_volumes_with_detail['response_body']['properties']['volumes']['items'] \
+    = list_volumes_detail
+update_volume = copy.deepcopy(volumes.update_volume)
+delete_volume = copy.deepcopy(volumes.delete_volume)
+show_volume_summary = copy.deepcopy(volumes.show_volume_summary)
+attach_volume = copy.deepcopy(volumes.attach_volume)
+set_bootable_volume = copy.deepcopy(volumes.set_bootable_volume)
+detach_volume = copy.deepcopy(volumes.detach_volume)
+reserve_volume = copy.deepcopy(volumes.reserve_volume)
+unreserve_volume = copy.deepcopy(volumes.unreserve_volume)
+extend_volume = copy.deepcopy(volumes.extend_volume)
+reset_volume_status = copy.deepcopy(volumes.reset_volume_status)
+update_volume_readonly = copy.deepcopy(volumes.update_volume_readonly)
+force_delete_volume = copy.deepcopy(volumes.force_delete_volume)
+retype_volume = copy.deepcopy(volumes.retype_volume)
+force_detach_volume = copy.deepcopy(volumes.force_detach_volume)
+create_volume_metadata = copy.deepcopy(volumes.create_volume_metadata)
+show_volume_metadata = copy.deepcopy(volumes.show_volume_metadata)
+update_volume_metadata = copy.deepcopy(volumes.update_volume_metadata)
+update_volume_metadata_item = copy.deepcopy(
+    volumes.update_volume_metadata_item)
+update_volume_image_metadata = copy.deepcopy(
+    volumes.update_volume_image_metadata)
+delete_volume_image_metadata = copy.deepcopy(
+    volumes.delete_volume_image_metadata)
+unmanage_volume = copy.deepcopy(volumes.unmanage_volume)
diff --git a/tempest/services/__init__.py b/tempest/lib/api_schema/response/volume/v3_64/__init__.py
similarity index 100%
copy from tempest/services/__init__.py
copy to tempest/lib/api_schema/response/volume/v3_64/__init__.py
diff --git a/tempest/lib/api_schema/response/volume/v3_64/backups.py b/tempest/lib/api_schema/response/volume/v3_64/backups.py
new file mode 100644
index 0000000..01b93bc
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/v3_64/backups.py
@@ -0,0 +1,48 @@
+# Copyright 2022 Red Hat, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
+from tempest.lib.api_schema.response.volume import backups
+
+# Volume micro version 3.64:
+# 1. Include the encryption_key_id in volume and backup
+# details when the associated volume is encrypted.
+# https://docs.openstack.org/cinder/latest/contributor/
+# api_microversion_history.html
+
+common_show_backup = copy.deepcopy(backups.common_show_backup)
+common_show_backup['properties'].update(
+    {'encryption_key_id': parameter_types.uuid_or_null})
+
+create_backup = copy.deepcopy(backups.create_backup)
+update_backup = copy.deepcopy(backups.update_backup)
+restore_backup = copy.deepcopy(backups.restore_backup)
+delete_backup = copy.deepcopy(backups.delete_backup)
+# show backup refers to common_show_backup
+show_backup = copy.deepcopy(backups.show_backup)
+show_backup['response_body']['properties']['backup'] = common_show_backup
+list_backups_no_detail = copy.deepcopy(backups.list_backups_no_detail)
+# list_backups_detail refers to latest common_show_backup
+list_backups_detail = copy.deepcopy(common_show_backup)
+list_backups_detail['properties'].update({'count': {'type': 'integer'}})
+list_backups_with_detail = copy.deepcopy(backups.list_backups_with_detail)
+# list_backups_with_detail refers to latest list_backups_detail
+list_backups_with_detail['response_body']['properties']['backups']['items'] =\
+    list_backups_detail
+export_backup = copy.deepcopy(backups.export_backup)
+import_backup = copy.deepcopy(backups.import_backup)
+reset_backup_status = copy.deepcopy(backups.reset_backup_status)
diff --git a/tempest/lib/api_schema/response/volume/v3_64/volumes.py b/tempest/lib/api_schema/response/volume/v3_64/volumes.py
new file mode 100644
index 0000000..0fbbb3f
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/v3_64/volumes.py
@@ -0,0 +1,69 @@
+# Copyright 2022 Red Hat, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
+from tempest.lib.api_schema.response.volume.v3_63 import volumes
+
+# Volume micro version 3.64:
+# 1. Include the encryption_key_id in volume and backup
+# details when the associated volume is encrypted.
+# https://docs.openstack.org/cinder/latest/contributor/
+# api_microversion_history.html
+
+common_show_volume = copy.deepcopy(volumes.common_show_volume)
+common_show_volume['properties'].update(
+    {'encryption_key_id': parameter_types.uuid_or_null})
+
+create_volume = copy.deepcopy(volumes.create_volume)
+create_volume['response_body']['properties']['volume']['properties'].update(
+    {'encryption_key_id': parameter_types.uuid_or_null})
+
+# copy unchanged volumes schema
+attachments = copy.deepcopy(volumes.attachments)
+list_volumes_no_detail = copy.deepcopy(volumes.list_volumes_no_detail)
+# show_volume refers to common_show_volume
+show_volume = copy.deepcopy(volumes.show_volume)
+show_volume['response_body']['properties']['volume'] = common_show_volume
+# list_volumes_detail refers to latest common_show_volume
+list_volumes_detail = copy.deepcopy(common_show_volume)
+list_volumes_with_detail = copy.deepcopy(volumes.list_volumes_with_detail)
+list_volumes_with_detail['response_body']['properties']['volumes']['items'] \
+    = list_volumes_detail
+update_volume = copy.deepcopy(volumes.update_volume)
+delete_volume = copy.deepcopy(volumes.delete_volume)
+show_volume_summary = copy.deepcopy(volumes.show_volume_summary)
+attach_volume = copy.deepcopy(volumes.attach_volume)
+set_bootable_volume = copy.deepcopy(volumes.set_bootable_volume)
+detach_volume = copy.deepcopy(volumes.detach_volume)
+reserve_volume = copy.deepcopy(volumes.reserve_volume)
+unreserve_volume = copy.deepcopy(volumes.unreserve_volume)
+extend_volume = copy.deepcopy(volumes.extend_volume)
+reset_volume_status = copy.deepcopy(volumes.reset_volume_status)
+update_volume_readonly = copy.deepcopy(volumes.update_volume_readonly)
+force_delete_volume = copy.deepcopy(volumes.force_delete_volume)
+retype_volume = copy.deepcopy(volumes.retype_volume)
+force_detach_volume = copy.deepcopy(volumes.force_detach_volume)
+create_volume_metadata = copy.deepcopy(volumes.create_volume_metadata)
+show_volume_metadata = copy.deepcopy(volumes.show_volume_metadata)
+update_volume_metadata = copy.deepcopy(volumes.update_volume_metadata)
+update_volume_metadata_item = copy.deepcopy(
+    volumes.update_volume_metadata_item)
+update_volume_image_metadata = copy.deepcopy(
+    volumes.update_volume_image_metadata)
+delete_volume_image_metadata = copy.deepcopy(
+    volumes.delete_volume_image_metadata)
+unmanage_volume = copy.deepcopy(volumes.unmanage_volume)
diff --git a/tempest/services/__init__.py b/tempest/lib/api_schema/response/volume/v3_65/__init__.py
similarity index 100%
copy from tempest/services/__init__.py
copy to tempest/lib/api_schema/response/volume/v3_65/__init__.py
diff --git a/tempest/lib/api_schema/response/volume/v3_65/volumes.py b/tempest/lib/api_schema/response/volume/v3_65/volumes.py
new file mode 100644
index 0000000..f7d9e1b
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/v3_65/volumes.py
@@ -0,0 +1,65 @@
+# Copyright 2022 Red Hat, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.volume.v3_64 import volumes
+
+# Volume microversion 3.65:
+# Add 'consumes_quota' attribute in volume details.
+
+common_show_volume = copy.deepcopy(volumes.common_show_volume)
+common_show_volume['properties'].update(
+    {'consumes_quota': {'type': 'boolean'}})
+
+create_volume = copy.deepcopy(volumes.create_volume)
+create_volume['response_body']['properties']['volume']['properties'].update(
+    {'consumes_quota': {'type': 'boolean'}})
+
+# copy unchanged volumes schema
+attachments = copy.deepcopy(volumes.attachments)
+list_volumes_no_detail = copy.deepcopy(volumes.list_volumes_no_detail)
+# show_volume refers to common_show_volume
+show_volume = copy.deepcopy(volumes.show_volume)
+show_volume['response_body']['properties']['volume'] = common_show_volume
+# list_volumes_detail refers to latest common_show_volume
+list_volumes_detail = copy.deepcopy(common_show_volume)
+list_volumes_with_detail = copy.deepcopy(volumes.list_volumes_with_detail)
+list_volumes_with_detail['response_body']['properties']['volumes']['items'] \
+    = list_volumes_detail
+update_volume = copy.deepcopy(volumes.update_volume)
+delete_volume = copy.deepcopy(volumes.delete_volume)
+show_volume_summary = copy.deepcopy(volumes.show_volume_summary)
+attach_volume = copy.deepcopy(volumes.attach_volume)
+set_bootable_volume = copy.deepcopy(volumes.set_bootable_volume)
+detach_volume = copy.deepcopy(volumes.detach_volume)
+reserve_volume = copy.deepcopy(volumes.reserve_volume)
+unreserve_volume = copy.deepcopy(volumes.unreserve_volume)
+extend_volume = copy.deepcopy(volumes.extend_volume)
+reset_volume_status = copy.deepcopy(volumes.reset_volume_status)
+update_volume_readonly = copy.deepcopy(volumes.update_volume_readonly)
+force_delete_volume = copy.deepcopy(volumes.force_delete_volume)
+retype_volume = copy.deepcopy(volumes.retype_volume)
+force_detach_volume = copy.deepcopy(volumes.force_detach_volume)
+create_volume_metadata = copy.deepcopy(volumes.create_volume_metadata)
+show_volume_metadata = copy.deepcopy(volumes.show_volume_metadata)
+update_volume_metadata = copy.deepcopy(volumes.update_volume_metadata)
+update_volume_metadata_item = copy.deepcopy(
+    volumes.update_volume_metadata_item)
+update_volume_image_metadata = copy.deepcopy(
+    volumes.update_volume_image_metadata)
+delete_volume_image_metadata = copy.deepcopy(
+    volumes.delete_volume_image_metadata)
+unmanage_volume = copy.deepcopy(volumes.unmanage_volume)
diff --git a/tempest/services/__init__.py b/tempest/lib/api_schema/response/volume/v3_69/__init__.py
similarity index 100%
copy from tempest/services/__init__.py
copy to tempest/lib/api_schema/response/volume/v3_69/__init__.py
diff --git a/tempest/lib/api_schema/response/volume/v3_69/volumes.py b/tempest/lib/api_schema/response/volume/v3_69/volumes.py
new file mode 100644
index 0000000..e83ef46
--- /dev/null
+++ b/tempest/lib/api_schema/response/volume/v3_69/volumes.py
@@ -0,0 +1,65 @@
+# Copyright 2022 Red Hat, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.volume.v3_65 import volumes
+
+# Volume microversion 3.69:
+# The 'shared_targets' attribute is now a tristate boolean.
+
+common_show_volume = copy.deepcopy(volumes.common_show_volume)
+common_show_volume['properties'].update(
+    {'shared_targets': {'type': ['boolean', 'null']}})
+
+create_volume = copy.deepcopy(volumes.create_volume)
+create_volume['response_body']['properties']['volume']['properties'].update(
+    {'shared_targets': {'type': ['boolean', 'null']}})
+
+# copy unchanged volumes schema
+attachments = copy.deepcopy(volumes.attachments)
+list_volumes_no_detail = copy.deepcopy(volumes.list_volumes_no_detail)
+# show_volume refers to common_show_volume
+show_volume = copy.deepcopy(volumes.show_volume)
+show_volume['response_body']['properties']['volume'] = common_show_volume
+# list_volumes_detail refers to latest common_show_volume
+list_volumes_detail = copy.deepcopy(common_show_volume)
+list_volumes_with_detail = copy.deepcopy(volumes.list_volumes_with_detail)
+list_volumes_with_detail['response_body']['properties']['volumes']['items'] \
+    = list_volumes_detail
+update_volume = copy.deepcopy(volumes.update_volume)
+delete_volume = copy.deepcopy(volumes.delete_volume)
+show_volume_summary = copy.deepcopy(volumes.show_volume_summary)
+attach_volume = copy.deepcopy(volumes.attach_volume)
+set_bootable_volume = copy.deepcopy(volumes.set_bootable_volume)
+detach_volume = copy.deepcopy(volumes.detach_volume)
+reserve_volume = copy.deepcopy(volumes.reserve_volume)
+unreserve_volume = copy.deepcopy(volumes.unreserve_volume)
+extend_volume = copy.deepcopy(volumes.extend_volume)
+reset_volume_status = copy.deepcopy(volumes.reset_volume_status)
+update_volume_readonly = copy.deepcopy(volumes.update_volume_readonly)
+force_delete_volume = copy.deepcopy(volumes.force_delete_volume)
+retype_volume = copy.deepcopy(volumes.retype_volume)
+force_detach_volume = copy.deepcopy(volumes.force_detach_volume)
+create_volume_metadata = copy.deepcopy(volumes.create_volume_metadata)
+show_volume_metadata = copy.deepcopy(volumes.show_volume_metadata)
+update_volume_metadata = copy.deepcopy(volumes.update_volume_metadata)
+update_volume_metadata_item = copy.deepcopy(
+    volumes.update_volume_metadata_item)
+update_volume_image_metadata = copy.deepcopy(
+    volumes.update_volume_image_metadata)
+delete_volume_image_metadata = copy.deepcopy(
+    volumes.delete_volume_image_metadata)
+unmanage_volume = copy.deepcopy(volumes.unmanage_volume)
diff --git a/tempest/lib/common/dynamic_creds.py b/tempest/lib/common/dynamic_creds.py
index be8c0e8..d687eb5 100644
--- a/tempest/lib/common/dynamic_creds.py
+++ b/tempest/lib/common/dynamic_creds.py
@@ -559,23 +559,24 @@
             except lib_exc.NotFound:
                 LOG.warning("user with name: %s not found for delete",
                             creds.username)
-            # NOTE(zhufl): Only when neutron's security_group ext is
-            # enabled, cleanup_default_secgroup will not raise error. But
-            # here cannot use test_utils.is_extension_enabled for it will cause
-            # "circular dependency". So here just use try...except to
-            # ensure tenant deletion without big changes.
-            try:
-                if self.neutron_available:
-                    self.cleanup_default_secgroup(
-                        self.security_groups_admin_client, creds.tenant_id)
-            except lib_exc.NotFound:
-                LOG.warning("failed to cleanup tenant %s's secgroup",
-                            creds.tenant_name)
-            try:
-                self.creds_client.delete_project(creds.tenant_id)
-            except lib_exc.NotFound:
-                LOG.warning("tenant with name: %s not found for delete",
-                            creds.tenant_name)
+            if creds.tenant_id:
+                # NOTE(zhufl): Only when neutron's security_group ext is
+                # enabled, cleanup_default_secgroup will not raise error. But
+                # here cannot use test_utils.is_extension_enabled for it will
+                # cause "circular dependency". So here just use try...except to
+                # ensure tenant deletion without big changes.
+                try:
+                    if self.neutron_available:
+                        self.cleanup_default_secgroup(
+                            self.security_groups_admin_client, creds.tenant_id)
+                except lib_exc.NotFound:
+                    LOG.warning("failed to cleanup tenant %s's secgroup",
+                                creds.tenant_name)
+                try:
+                    self.creds_client.delete_project(creds.tenant_id)
+                except lib_exc.NotFound:
+                    LOG.warning("tenant with name: %s not found for delete",
+                                creds.tenant_name)
 
             # if cred is domain scoped, delete ephemeral domain
             # do not delete default domain
diff --git a/tempest/lib/common/rest_client.py b/tempest/lib/common/rest_client.py
index ef14dfc..a11b7c1 100644
--- a/tempest/lib/common/rest_client.py
+++ b/tempest/lib/common/rest_client.py
@@ -414,6 +414,11 @@
                 return resp[i]
         return ""
 
+    def _get_global_request_id(self, resp):
+        if 'x-openstack-request-id' in resp:
+            return resp['x-openstack-request-id']
+        return ''
+
     def _safe_body(self, body, maxlen=4096):
         # convert a structure into a string safely
         try:
@@ -461,7 +466,10 @@
         if req_headers is None:
             req_headers = {}
         # if we have the request id, put it in the right part of the log
-        extra = dict(request_id=self._get_request_id(resp))
+        extra = {
+            'request_id': self._get_request_id(resp),
+            'global_request_id': self._get_global_request_id(resp),
+        }
         # NOTE(sdague): while we still have 6 callers to this function
         # we're going to just provide work around on who is actually
         # providing timings by gracefully adding no content if they don't.
@@ -484,7 +492,7 @@
             self._log_request_full(resp, req_headers, req_body,
                                    resp_body, extra)
 
-    def _parse_resp(self, body):
+    def _parse_resp(self, body, top_key_to_verify=None):
         try:
             body = json.loads(body)
         except ValueError:
@@ -508,8 +516,17 @@
             if not hasattr(body, "keys") or len(body.keys()) != 1:
                 return body
             # Just return the "wrapped" element
-            _, first_item = tuple(body.items())[0]
+            first_key, first_item = tuple(body.items())[0]
             if isinstance(first_item, (dict, list)):
+                if top_key_to_verify is not None:
+                    msg_args = {
+                        'top_key': top_key_to_verify,
+                        'actual_key': first_key,
+                    }
+                    assert_msg = ("The expected top level key is "
+                                  "'%(top_key)s' but we found "
+                                  "'%(actual_key)s'." % msg_args)
+                    assert top_key_to_verify == first_key, assert_msg
                 return first_item
         except (ValueError, IndexError):
             pass
diff --git a/tempest/lib/services/compute/__init__.py b/tempest/lib/services/compute/__init__.py
index 8d07a45..da800af 100644
--- a/tempest/lib/services/compute/__init__.py
+++ b/tempest/lib/services/compute/__init__.py
@@ -52,6 +52,8 @@
     SecurityGroupRulesClient
 from tempest.lib.services.compute.security_groups_client import \
     SecurityGroupsClient
+from tempest.lib.services.compute.server_external_events_client \
+    import ServerExternalEventsClient
 from tempest.lib.services.compute.server_groups_client import \
     ServerGroupsClient
 from tempest.lib.services.compute.servers_client import ServersClient
@@ -75,6 +77,6 @@
            'MigrationsClient', 'NetworksClient', 'QuotaClassesClient',
            'QuotasClient', 'SecurityGroupDefaultRulesClient',
            'SecurityGroupRulesClient', 'SecurityGroupsClient',
-           'ServerGroupsClient', 'ServersClient', 'ServicesClient',
-           'SnapshotsClient', 'TenantNetworksClient', 'TenantUsagesClient',
-           'VersionsClient', 'VolumesClient']
+           'ServerExternalEventsClient', 'ServerGroupsClient', 'ServersClient',
+           'ServicesClient', 'SnapshotsClient', 'TenantNetworksClient',
+           'TenantUsagesClient', 'VersionsClient', 'VolumesClient']
diff --git a/tempest/lib/services/compute/server_external_events_client.py b/tempest/lib/services/compute/server_external_events_client.py
new file mode 100644
index 0000000..683dce1
--- /dev/null
+++ b/tempest/lib/services/compute/server_external_events_client.py
@@ -0,0 +1,36 @@
+# Copyright 2022 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.api_schema.response.compute.v2_1 import \
+    server_external_events as schema
+from tempest.lib.common import rest_client
+from tempest.lib.services.compute import base_compute_client
+
+
+class ServerExternalEventsClient(base_compute_client.BaseComputeClient):
+
+    def create_server_external_events(self, events):
+        """Create Server External Events.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/compute/#run-events
+        """
+        post_body = json.dumps({'events': events})
+        resp, body = self.post("os-server-external-events", post_body)
+        body = json.loads(body)
+        self.validate_response(schema.create, resp, body)
+        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/image/v2/__init__.py b/tempest/lib/services/image/v2/__init__.py
index 99a5321..a2f5bdc 100644
--- a/tempest/lib/services/image/v2/__init__.py
+++ b/tempest/lib/services/image/v2/__init__.py
@@ -12,6 +12,8 @@
 # License for the specific language governing permissions and limitations under
 # the License.
 
+from tempest.lib.services.image.v2.image_cache_client import \
+    ImageCacheClient
 from tempest.lib.services.image.v2.image_members_client import \
     ImageMembersClient
 from tempest.lib.services.image.v2.images_client import ImagesClient
@@ -27,7 +29,7 @@
 from tempest.lib.services.image.v2.schemas_client import SchemasClient
 from tempest.lib.services.image.v2.versions_client import VersionsClient
 
-__all__ = ['ImageMembersClient', 'ImagesClient', 'NamespaceObjectsClient',
-           'NamespacePropertiesClient', 'NamespaceTagsClient',
-           'NamespacesClient', 'ResourceTypesClient', 'SchemasClient',
-           'VersionsClient']
+__all__ = ['ImageMembersClient', 'ImagesClient', 'ImageCacheClient',
+           'NamespaceObjectsClient', 'NamespacePropertiesClient',
+           'NamespaceTagsClient', 'NamespacesClient', 'ResourceTypesClient',
+           'SchemasClient', 'VersionsClient']
diff --git a/tempest/lib/services/image/v2/image_cache_client.py b/tempest/lib/services/image/v2/image_cache_client.py
new file mode 100644
index 0000000..90ff776
--- /dev/null
+++ b/tempest/lib/services/image/v2/image_cache_client.py
@@ -0,0 +1,74 @@
+# Copyright 2022 Red Hat, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.common import rest_client
+
+
+class ImageCacheClient(rest_client.RestClient):
+    api_version = "v2"
+
+    def list_cache(self):
+        """Lists all images in cache or queue. (Since Image API v2.14)
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/image/v2/?expanded=query-cache-status-detail#cache-manage
+        """
+        url = 'cache'
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def cache_queue(self, image_id):
+        """Queues image for caching. (Since Image API v2.14)
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/image/v2/?expanded=queue-image-detail#queue-image
+        """
+        url = 'cache/%s' % image_id
+        resp, body = self.put(url, body=None)
+        self.expected_success(202, resp.status)
+        return rest_client.ResponseBody(resp, body=body)
+
+    def cache_delete(self, image_id):
+        """Deletes a image from cache. (Since Image API v2.14)
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/image/v2/?expanded=delete-image-from-cache-detail#delete-image-from-cache
+        """
+        url = 'cache/%s' % image_id
+        resp, _ = self.delete(url)
+        self.expected_success(204, resp.status)
+        return rest_client.ResponseBody(resp)
+
+    def cache_clear(self, target=None):
+        """Clears the cache and its queue. (Since Image API v2.14)
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/image/v2/?expanded=clear-images-from-cache-detail#delete-image-from-cache
+        """
+        url = 'cache'
+        headers = {}
+        if target:
+            headers['x-image-cache-clear-target'] = target
+        resp, _ = self.delete(url, headers=headers)
+        self.expected_success(204, resp.status)
+        return rest_client.ResponseBody(resp)
diff --git a/tempest/lib/services/image/v2/images_client.py b/tempest/lib/services/image/v2/images_client.py
index abf427c..ae6ce25 100644
--- a/tempest/lib/services/image/v2/images_client.py
+++ b/tempest/lib/services/image/v2/images_client.py
@@ -206,7 +206,7 @@
 
     def image_import(self, image_id, method='glance-direct',
                      all_stores_must_succeed=None, all_stores=True,
-                     stores=None, image_uri=None):
+                     stores=None, import_params=None):
         """Import data from staging area to glance store.
 
         For a full list of available parameters, please refer to the official
@@ -222,9 +222,11 @@
                            all available stores (incompatible with stores)
         :param stores: A list of destination store names for the import. Must
                        be None if server does not support multistore.
-        :param image_uri: A URL to be used with the web-download method
+        :param import_params: A dict of import method parameters
         """
         url = 'images/%s/import' % image_id
+        if import_params is None:
+            import_params = {}
         data = {
             "method": {
                 "name": method
@@ -237,8 +239,8 @@
 
         if all_stores_must_succeed is not None:
             data['all_stores_must_succeed'] = all_stores_must_succeed
-        if image_uri:
-            data['method']['uri'] = image_uri
+        if import_params:
+            data['method'].update(import_params)
         data = json.dumps(data)
         headers = {'Content-Type': 'application/json'}
         resp, _ = self.post(url, data, headers=headers)
diff --git a/tempest/lib/services/object_storage/account_client.py b/tempest/lib/services/object_storage/account_client.py
index 52b2534..d7ce526 100644
--- a/tempest/lib/services/object_storage/account_client.py
+++ b/tempest/lib/services/object_storage/account_client.py
@@ -14,8 +14,8 @@
 #    under the License.
 
 from urllib import parse as urllib
-from xml.etree import ElementTree as etree
 
+from defusedxml import ElementTree as etree
 from oslo_serialization import jsonutils as json
 
 from tempest.lib.common import rest_client
diff --git a/tempest/lib/services/object_storage/container_client.py b/tempest/lib/services/object_storage/container_client.py
index 6d07ec1..ee87726 100644
--- a/tempest/lib/services/object_storage/container_client.py
+++ b/tempest/lib/services/object_storage/container_client.py
@@ -14,9 +14,9 @@
 #    under the License.
 
 from urllib import parse as urllib
-from xml.etree import ElementTree as etree
 
 import debtcollector.moves
+from defusedxml import ElementTree as etree
 from oslo_serialization import jsonutils as json
 
 from tempest.lib.common import rest_client
diff --git a/tempest/lib/services/volume/v3/backups_client.py b/tempest/lib/services/volume/v3/backups_client.py
index 4bf7ffb..0c32c52 100644
--- a/tempest/lib/services/volume/v3/backups_client.py
+++ b/tempest/lib/services/volume/v3/backups_client.py
@@ -18,6 +18,7 @@
 from oslo_serialization import jsonutils as json
 
 from tempest.lib.api_schema.response.volume import backups as schema
+from tempest.lib.api_schema.response.volume.v3_64 import backups as schemav364
 from tempest.lib.common import rest_client
 from tempest.lib import exceptions as lib_exc
 from tempest.lib.services.volume import base_client
@@ -26,6 +27,11 @@
 class BackupsClient(base_client.BaseClient):
     """Volume V3 Backups client"""
 
+    schema_versions_info = [
+        {'min': None, 'max': '3.63', 'schema': schema},
+        {'min': '3.64', 'max': None, 'schema': schemav364}
+        ]
+
     def create_backup(self, **kwargs):
         """Creates a backup of volume.
 
@@ -76,6 +82,7 @@
         url = "backups/%s" % backup_id
         resp, body = self.get(url)
         body = json.loads(body)
+        schema = self.get_schema(self.schema_versions_info)
         self.validate_response(schema.show_backup, resp, body)
         return rest_client.ResponseBody(resp, body)
 
@@ -88,6 +95,7 @@
         https://docs.openstack.org/api-ref/block-storage/v3/index.html#list-backups-with-detail
         """
         url = "backups"
+        schema = self.get_schema(self.schema_versions_info)
         list_backups_schema = schema.list_backups_no_detail
         if detail:
             url += "/detail"
diff --git a/tempest/lib/services/volume/v3/transfers_client.py b/tempest/lib/services/volume/v3/transfers_client.py
index cc4e1b2..f85bf21 100644
--- a/tempest/lib/services/volume/v3/transfers_client.py
+++ b/tempest/lib/services/volume/v3/transfers_client.py
@@ -18,12 +18,23 @@
 from oslo_serialization import jsonutils as json
 
 from tempest.lib.api_schema.response.volume import transfers as schema
+from tempest.lib.api_schema.response.volume.v3_55 \
+    import transfers as schemav355
+from tempest.lib.api_schema.response.volume.v3_57 \
+    import transfers as schemav357
 from tempest.lib.common import rest_client
+from tempest.lib.services.volume import base_client
 
 
-class TransfersClient(rest_client.RestClient):
+class TransfersClient(base_client.BaseClient):
     """Client class to send CRUD Volume Transfer API requests"""
 
+    schema_versions_info = [
+        {'min': None, 'max': '3.54', 'schema': schema},
+        {'min': '3.55', 'max': '3.56', 'schema': schemav355},
+        {'min': '3.57', 'max': None, 'schema': schemav357}
+    ]
+
     resource_path = 'os-volume-transfer'
 
     def create_volume_transfer(self, **kwargs):
@@ -36,6 +47,7 @@
         post_body = json.dumps({'transfer': kwargs})
         resp, body = self.post(self.resource_path, post_body)
         body = json.loads(body)
+        schema = self.get_schema(self.schema_versions_info)
         self.validate_response(schema.create_volume_transfer, resp, body)
         return rest_client.ResponseBody(resp, body)
 
@@ -44,6 +56,7 @@
         url = "%s/%s" % (self.resource_path, transfer_id)
         resp, body = self.get(url)
         body = json.loads(body)
+        schema = self.get_schema(self.schema_versions_info)
         self.validate_response(schema.show_volume_transfer, resp, body)
         return rest_client.ResponseBody(resp, body)
 
@@ -56,6 +69,7 @@
         https://docs.openstack.org/api-ref/block-storage/v3/index.html#list-volume-transfers-and-details
         """
         url = self.resource_path
+        schema = self.get_schema(self.schema_versions_info)
         schema_list_transfers = schema.list_volume_transfers_no_detail
         if detail:
             url += '/detail'
@@ -70,6 +84,7 @@
     def delete_volume_transfer(self, transfer_id):
         """Delete a volume transfer."""
         resp, body = self.delete("%s/%s" % (self.resource_path, transfer_id))
+        schema = self.get_schema(self.schema_versions_info)
         self.validate_response(schema.delete_volume_transfer, resp, body)
         return rest_client.ResponseBody(resp, body)
 
@@ -84,6 +99,7 @@
         post_body = json.dumps({'accept': kwargs})
         resp, body = self.post(url, post_body)
         body = json.loads(body)
+        schema = self.get_schema(self.schema_versions_info)
         self.validate_response(schema.accept_volume_transfer, resp, body)
         return rest_client.ResponseBody(resp, body)
 
diff --git a/tempest/lib/services/volume/v3/volumes_client.py b/tempest/lib/services/volume/v3/volumes_client.py
index 9c6fe68..ad8bd71 100644
--- a/tempest/lib/services/volume/v3/volumes_client.py
+++ b/tempest/lib/services/volume/v3/volumes_client.py
@@ -17,6 +17,11 @@
 
 from oslo_serialization import jsonutils as json
 
+from tempest.lib.api_schema.response.volume.v3_61 import volumes as schemav361
+from tempest.lib.api_schema.response.volume.v3_63 import volumes as schemav363
+from tempest.lib.api_schema.response.volume.v3_64 import volumes as schemav364
+from tempest.lib.api_schema.response.volume.v3_65 import volumes as schemav365
+from tempest.lib.api_schema.response.volume.v3_69 import volumes as schemav369
 from tempest.lib.api_schema.response.volume import volumes as schema
 from tempest.lib.common import rest_client
 from tempest.lib import exceptions as lib_exc
@@ -26,6 +31,15 @@
 class VolumesClient(base_client.BaseClient):
     """Client class to send CRUD Volume V3 API requests"""
 
+    schema_versions_info = [
+        {'min': None, 'max': '3.60', 'schema': schema},
+        {'min': '3.61', 'max': '3.62', 'schema': schemav361},
+        {'min': '3.63', 'max': '3.63', 'schema': schemav363},
+        {'min': '3.64', 'max': '3.64', 'schema': schemav364},
+        {'min': '3.65', 'max': '3.68', 'schema': schemav365},
+        {'min': '3.69', 'max': None, 'schema': schemav369}
+        ]
+
     def _prepare_params(self, params):
         """Prepares params for use in get or _ext_get methods.
 
@@ -56,6 +70,7 @@
         https://docs.openstack.org/api-ref/block-storage/v3/index.html#list-accessible-volumes
         """
         url = 'volumes'
+        schema = self.get_schema(self.schema_versions_info)
         list_schema = schema.list_volumes_no_detail
         if detail:
             list_schema = schema.list_volumes_with_detail
@@ -86,6 +101,7 @@
         url = "volumes/%s" % volume_id
         resp, body = self.get(url)
         body = json.loads(body)
+        schema = self.get_schema(self.schema_versions_info)
         self.validate_response(schema.show_volume, resp, body)
         return rest_client.ResponseBody(resp, body)
 
@@ -99,6 +115,7 @@
         post_body = json.dumps({'volume': kwargs})
         resp, body = self.post('volumes', post_body)
         body = json.loads(body)
+        schema = self.get_schema(self.schema_versions_info)
         self.validate_response(schema.create_volume, resp, body)
         return rest_client.ResponseBody(resp, body)
 
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index db28487..bf3f62f 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -145,6 +145,7 @@
             - 'binding:vnic_type' - defaults to CONF.network.port_vnic_type
             - 'binding:profile' - defaults to CONF.network.port_profile
         """
+
         if not client:
             client = self.ports_client
         name = data_utils.rand_name(
@@ -158,10 +159,12 @@
             network_id=network_id,
             **kwargs)
         self.assertIsNotNone(result, 'Unable to allocate port')
-        port = result['port']
+        port_id = result['port']['id']
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        client.delete_port, port['id'])
-        return port
+                        client.delete_port, port_id)
+        port = waiters.wait_for_port_status(
+            client=client, port_id=port_id, status="DOWN")
+        return port["port"]
 
     def create_keypair(self, client=None, **kwargs):
         """Creates keypair
@@ -419,8 +422,12 @@
 
         body = self.backups_client.restore_backup(backup_id, **kwargs)
         restore = body['restore']
-        self.addCleanup(self.volumes_client.delete_volume,
-                        restore['volume_id'])
+
+        using_pre_existing_volume = kwargs.get('volume_id', False)
+        if not using_pre_existing_volume:
+            self.addCleanup(self.volumes_client.delete_volume,
+                            restore['volume_id'])
+
         waiters.wait_for_volume_resource_status(self.backups_client,
                                                 backup_id, 'available')
         waiters.wait_for_volume_resource_status(self.volumes_client,
@@ -473,7 +480,8 @@
 
         self.addCleanup(self.snapshots_client.wait_for_resource_deletion,
                         snapshot['id'])
-        self.addCleanup(self.snapshots_client.delete_snapshot, snapshot['id'])
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        self.snapshots_client.delete_snapshot, snapshot['id'])
         waiters.wait_for_volume_resource_status(self.snapshots_client,
                                                 snapshot['id'], 'available')
         snapshot = self.snapshots_client.show_snapshot(
@@ -810,7 +818,9 @@
             name = data_utils.rand_name(self.__class__.__name__ + 'snapshot')
         LOG.debug("Creating a snapshot image for server: %s", server['name'])
         image = _images_client.create_image(server['id'], name=name, **kwargs)
-        image_id = image.response['location'].split('images/')[1]
+        # microversion 2.45 and above returns image_id
+        image_id = image.get('image_id') or image.response['location'].split(
+            'images/')[1]
         waiters.wait_for_image_status(_image_client, image_id, 'active')
 
         self.addCleanup(_image_client.wait_for_resource_deletion,
diff --git a/tempest/scenario/test_encrypted_cinder_volumes.py b/tempest/scenario/test_encrypted_cinder_volumes.py
index 6ee9f28..9788e19 100644
--- a/tempest/scenario/test_encrypted_cinder_volumes.py
+++ b/tempest/scenario/test_encrypted_cinder_volumes.py
@@ -13,6 +13,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import testtools
+
 from tempest.common import utils
 from tempest import config
 from tempest.lib import decorators
@@ -27,7 +29,7 @@
 
     This test is for verifying the functionality of encrypted cinder volumes.
 
-    For both LUKS and cryptsetup encryption types, this test performs
+    For both LUKS (v1 & v2) and cryptsetup encryption types, this test performs
     the following:
 
     * Boots an instance from an image (CONF.compute.image_ref)
@@ -55,11 +57,24 @@
     @decorators.attr(type='slow')
     @utils.services('compute', 'volume', 'image')
     def test_encrypted_cinder_volumes_luks(self):
+        """LUKs v1 decrypts volume through libvirt."""
         server = self.launch_instance()
         volume = self.create_encrypted_volume('luks',
                                               volume_type='luks')
         self.attach_detach_volume(server, volume)
 
+    @decorators.idempotent_id('7abec0a3-61a0-42a5-9e36-ad3138fb38b4')
+    @testtools.skipIf(CONF.volume.storage_protocol == 'ceph',
+                      'Ceph only supports LUKSv2 if doing host attach.')
+    @decorators.attr(type='slow')
+    @utils.services('compute', 'volume', 'image')
+    def test_encrypted_cinder_volumes_luksv2(self):
+        """LUKs v2 decrypts volume through os-brick."""
+        server = self.launch_instance()
+        volume = self.create_encrypted_volume('luks2',
+                                              volume_type='luksv2')
+        self.attach_detach_volume(server, volume)
+
     @decorators.idempotent_id('cbc752ed-b716-4717-910f-956cce965722')
     @decorators.attr(type='slow')
     @utils.services('compute', 'volume', 'image')
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 5aac19c..90e1bc5 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -13,6 +13,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import testtools
+
 from tempest.common import custom_matchers
 from tempest.common import utils
 from tempest.common import waiters
@@ -29,24 +31,11 @@
 
     """This is a basic minimum scenario test.
 
-    This test below:
+    These tests below:
     * across the multiple components
     * as a regular user
     * with and without optional parameters
     * check command outputs
-
-    Steps:
-    1. Create image
-    2. Create keypair
-    3. Boot instance with keypair and get list of instances
-    4. Create volume and show list of volumes
-    5. Attach volume to instance and getlist of volumes
-    6. Add IP to instance
-    7. Create and add security group to instance
-    8. Check SSH connection to instance
-    9. Reboot instance
-    10. Check SSH connection to instance after reboot
-
     """
 
     def nova_show(self, server):
@@ -67,8 +56,9 @@
             volume, custom_matchers.MatchesDictExceptForKeys(
                 got_volume, excluded_keys=excluded_keys))
 
-    def nova_reboot(self, server):
-        self.servers_client.reboot_server(server['id'], type='SOFT')
+    def nova_reboot(self, server, hard=False):
+        self.servers_client.reboot_server(server['id'],
+                                          type="HARD" if hard else "SOFT")
         waiters.wait_for_server_status(self.servers_client,
                                        server['id'], 'ACTIVE')
 
@@ -99,6 +89,20 @@
     @decorators.idempotent_id('bdbb5441-9204-419d-a225-b4fdbfb1a1a8')
     @utils.services('compute', 'volume', 'image', 'network')
     def test_minimum_basic_scenario(self):
+        """This is a basic minimum scenario with multiple components
+
+        Steps:
+        1. Create image
+        2. Create keypair
+        3. Boot instance with keypair and get list of instances
+        4. Create volume and show list of volumes
+        5. Attach volume to instance and getlist of volumes
+        6. Add IP to instance
+        7. Create and add security group to instance
+        8. Check SSH connection to instance
+        9. Reboot instance
+        10. Check SSH connection to instance after reboot
+        """
         image = self.image_create()
         keypair = self.create_keypair()
 
@@ -121,7 +125,7 @@
         floating_ip = None
         server = self.servers_client.show_server(server['id'])['server']
         if (CONF.network_feature_enabled.floating_ips and
-            CONF.network.floating_network_name):
+                CONF.network.floating_network_name):
             fip = self.create_floating_ip(server)
             floating_ip = self.associate_floating_ip(
                 fip, server)
@@ -154,3 +158,91 @@
             waiters.wait_for_server_floating_ip(
                 self.servers_client, server, floating_ip,
                 wait_for_disassociate=True)
+
+    @decorators.idempotent_id('a8fd48ec-1d01-4895-b932-02321661ec1e')
+    @testtools.skipUnless(CONF.volume_feature_enabled.snapshot,
+                          "Cinder volume snapshots are disabled")
+    @utils.services('compute', 'volume', 'image', 'network')
+    def test_minimum_basic_instance_hard_reboot_after_vol_snap_deletion(self):
+        """Test compute hard reboot after volume snapshot deleted
+
+        Steps:
+        1. Create image
+        2. Create keypair
+        3. Boot instance with keypair and get list of instances
+        4. Create volume and show list of volumes
+        5. Attach volume to instance and getlist of volumes
+        6. Create a snapshot from volume
+        7. Add IP to instance
+        8. Create and add security group to instance
+        9. Check SSH connection to instance
+        10. Write data timestamp to the attached volume
+        11. Delete volume snapshot before reboot instance
+        12. Reboot instance (HARD)
+        13. Check SSH connection to instance after reboot
+        14. Verify attached disk data timestamp post instance reboot
+        """
+        image = self.image_create()
+        keypair = self.create_keypair()
+
+        server = self.create_server(image_id=image, key_name=keypair['name'])
+        servers = self.servers_client.list_servers()['servers']
+        self.assertIn(server['id'], [x['id'] for x in servers])
+
+        self.nova_show(server)
+
+        volume = self.create_volume()
+        volumes = self.volumes_client.list_volumes()['volumes']
+        self.assertIn(volume['id'], [x['id'] for x in volumes])
+
+        self.cinder_show(volume)
+
+        volume = self.nova_volume_attach(server, volume)
+        self.addCleanup(self.nova_volume_detach, server, volume)
+        snapshot = self.create_volume_snapshot(volume['id'], force=True)
+        self.cinder_show(volume)
+
+        floating_ip = None
+        server = self.servers_client.show_server(server['id'])['server']
+        if (CONF.network_feature_enabled.floating_ips and
+                CONF.network.floating_network_name):
+            fip = self.create_floating_ip(server)
+            floating_ip = self.associate_floating_ip(
+                fip, server)
+            waiters.wait_for_server_floating_ip(self.servers_client, server,
+                                                floating_ip)
+            ssh_ip = floating_ip['floating_ip_address']
+        else:
+            ssh_ip = self.get_server_ip(server)
+
+        self.create_and_add_security_group_to_server(server)
+
+        # check that we can SSH to the server before reboot
+        self.linux_client = self.get_remote_client(
+            ssh_ip, private_key=keypair['private_key'],
+            server=server)
+
+        # write data to the volume before reboot instance
+        timestamp_before = self.create_timestamp(
+            ssh_ip, private_key=keypair['private_key'], server=server)
+        # delete the snapshot before rebooting the instance
+        self.snapshots_client.delete_snapshot(snapshot['id'])
+        self.snapshots_client.wait_for_resource_deletion(snapshot['id'])
+        self.nova_reboot(server, hard=True)
+
+        # check that we can SSH to the server after reboot
+        # (both connections are part of the scenario)
+        self.linux_client = self.get_remote_client(
+            ssh_ip, private_key=keypair['private_key'],
+            server=server)
+
+        self.check_disks()
+        timestamp_after = self.get_timestamp(
+            ssh_ip, private_key=keypair['private_key'], server=server)
+        self.assertEqual(timestamp_before, timestamp_after)
+        if floating_ip:
+            # delete the floating IP, this should refresh the server addresses
+            self.disassociate_floating_ip(floating_ip)
+            waiters.wait_for_server_floating_ip(
+                self.servers_client, server, floating_ip,
+                wait_for_disassociate=True)
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index b48ac3c..e630e29 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -15,7 +15,9 @@
 
 import testtools
 
+from oslo_log import log
 from tempest.common import utils
+from tempest.common.utils import net_downtime
 from tempest.common import waiters
 from tempest import config
 from tempest.lib import decorators
@@ -23,6 +25,8 @@
 
 CONF = config.CONF
 
+LOG = log.getLogger(__name__)
+
 
 class TestNetworkAdvancedServerOps(manager.NetworkScenarioTest):
     """Check VM connectivity after some advanced instance operations executed:
@@ -252,6 +256,11 @@
         block_migration = (CONF.compute_feature_enabled.
                            block_migration_for_live_migration)
         old_host = self.get_host_for_server(server['id'])
+
+        downtime_meter = net_downtime.NetDowntimeMeter(
+            floating_ip['floating_ip_address'])
+        self.useFixture(downtime_meter)
+
         self.admin_servers_client.live_migrate_server(
             server['id'], host=None, block_migration=block_migration,
             disk_over_commit=False)
@@ -261,10 +270,19 @@
         new_host = self.get_host_for_server(server['id'])
         self.assertNotEqual(old_host, new_host, 'Server did not migrate')
 
+        downtime = downtime_meter.get_downtime()
+        self.assertIsNotNone(downtime)
+        LOG.debug("Downtime seconds measured with downtime_meter = %r",
+                  downtime)
+        allowed_downtime = CONF.validation.allowed_network_downtime
+        self.assertLess(
+            downtime, allowed_downtime,
+            "Downtime of {} seconds is higher than expected '{}'".format(
+                downtime, allowed_downtime))
+
         self._wait_server_status_and_check_network_connectivity(
             server, keypair, floating_ip)
 
-    @decorators.unstable_test(bug='1836595')
     @decorators.idempotent_id('25b188d7-0183-4b1e-a11d-15840c8e2fd6')
     @testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
                           'Cold migration is not available.')
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index 5a5cc27..2e87c15 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -246,14 +246,10 @@
         # Assert that the underlying volume is gone.
         self.volumes_client.wait_for_resource_deletion(volume_origin['id'])
 
-    @decorators.idempotent_id('cb78919a-e553-4bab-b73b-10cf4d2eb125')
-    @testtools.skipUnless(CONF.compute_feature_enabled.attach_encrypted_volume,
-                          'Encrypted volume attach is not supported')
-    @utils.services('compute', 'volume')
-    def test_boot_server_from_encrypted_volume_luks(self):
+    def _do_test_boot_server_from_encrypted_volume_luks(self, provider):
         # Create an encrypted volume
-        volume = self.create_encrypted_volume('luks',
-                                              volume_type='luks')
+        volume = self.create_encrypted_volume(provider,
+                                              volume_type=provider)
 
         self.volumes_client.set_bootable_volume(volume['id'], bootable=True)
 
@@ -266,3 +262,21 @@
         server_info = self.servers_client.show_server(server['id'])['server']
         created_volume = server_info['os-extended-volumes:volumes_attached']
         self.assertEqual(volume['id'], created_volume[0]['id'])
+
+    @decorators.idempotent_id('cb78919a-e553-4bab-b73b-10cf4d2eb125')
+    @testtools.skipUnless(CONF.compute_feature_enabled.attach_encrypted_volume,
+                          'Encrypted volume attach is not supported')
+    @utils.services('compute', 'volume')
+    def test_boot_server_from_encrypted_volume_luks(self):
+        """LUKs v1 decrypts volume through libvirt."""
+        self._do_test_boot_server_from_encrypted_volume_luks('luks')
+
+    @decorators.idempotent_id('5ab6100f-1b31-4dd0-a774-68cfd837ef77')
+    @testtools.skipIf(CONF.volume.storage_protocol == 'ceph',
+                      'Ceph only supports LUKSv2 if doing host attach.')
+    @testtools.skipUnless(CONF.compute_feature_enabled.attach_encrypted_volume,
+                          'Encrypted volume attach is not supported')
+    @utils.services('compute', 'volume')
+    def test_boot_server_from_encrypted_volume_luksv2(self):
+        """LUKs v2 decrypts volume through os-brick."""
+        self._do_test_boot_server_from_encrypted_volume_luks('luks2')
diff --git a/tempest/services/orchestration/__init__.py b/tempest/services/orchestration/__init__.py
deleted file mode 100644
index 5a1ffcc..0000000
--- a/tempest/services/orchestration/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2016 Hewlett-Packard Enterprise Development Company, L.P.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-from tempest.services.orchestration.json.orchestration_client import \
-    OrchestrationClient
-
-__all__ = ['OrchestrationClient']
diff --git a/tempest/services/orchestration/json/__init__.py b/tempest/services/orchestration/json/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tempest/services/orchestration/json/__init__.py
+++ /dev/null
diff --git a/tempest/services/orchestration/json/orchestration_client.py b/tempest/services/orchestration/json/orchestration_client.py
deleted file mode 100644
index 0d7720e..0000000
--- a/tempest/services/orchestration/json/orchestration_client.py
+++ /dev/null
@@ -1,413 +0,0 @@
-# Copyright 2013 IBM Corp.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-import re
-import time
-from urllib import parse as urllib
-
-from oslo_serialization import jsonutils as json
-
-from tempest import exceptions
-from tempest.lib.common import rest_client
-from tempest.lib import exceptions as lib_exc
-
-
-class OrchestrationClient(rest_client.RestClient):
-
-    def list_stacks(self, params=None):
-        """Lists all stacks for a user."""
-
-        uri = 'stacks'
-        if params:
-            uri += '?%s' % urllib.urlencode(params)
-
-        resp, body = self.get(uri)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def create_stack(self, name, disable_rollback=True, parameters=None,
-                     timeout_mins=60, template=None, template_url=None,
-                     environment=None, files=None):
-        if parameters is None:
-            parameters = {}
-        headers, body = self._prepare_update_create(
-            name,
-            disable_rollback,
-            parameters,
-            timeout_mins,
-            template,
-            template_url,
-            environment,
-            files)
-        uri = 'stacks'
-        resp, body = self.post(uri, headers=headers, body=body)
-        self.expected_success(201, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def update_stack(self, stack_identifier, name, disable_rollback=True,
-                     parameters=None, timeout_mins=60, template=None,
-                     template_url=None, environment=None, files=None):
-        if parameters is None:
-            parameters = {}
-        headers, body = self._prepare_update_create(
-            name,
-            disable_rollback,
-            parameters,
-            timeout_mins,
-            template,
-            template_url,
-            environment)
-
-        uri = "stacks/%s" % stack_identifier
-        resp, body = self.put(uri, headers=headers, body=body)
-        self.expected_success(202, resp.status)
-        return rest_client.ResponseBody(resp, body)
-
-    def _prepare_update_create(self, name, disable_rollback=True,
-                               parameters=None, timeout_mins=60,
-                               template=None, template_url=None,
-                               environment=None, files=None):
-        if parameters is None:
-            parameters = {}
-        post_body = {
-            "stack_name": name,
-            "disable_rollback": disable_rollback,
-            "parameters": parameters,
-            "timeout_mins": timeout_mins,
-            "template": "HeatTemplateFormatVersion: '2012-12-12'\n",
-            "environment": environment,
-            "files": files
-        }
-        if template:
-            post_body['template'] = template
-        if template_url:
-            post_body['template_url'] = template_url
-        body = json.dumps(post_body)
-
-        # Password must be provided on stack create so that heat
-        # can perform future operations on behalf of the user
-        headers = self.get_headers()
-        headers['X-Auth-Key'] = self.password
-        headers['X-Auth-User'] = self.user
-        return headers, body
-
-    def show_stack(self, stack_identifier):
-        """Returns the details of a single stack."""
-        url = "stacks/%s" % stack_identifier
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def suspend_stack(self, stack_identifier):
-        """Suspend a stack."""
-        url = 'stacks/%s/actions' % stack_identifier
-        body = {'suspend': None}
-        resp, body = self.post(url, json.dumps(body))
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp)
-
-    def resume_stack(self, stack_identifier):
-        """Resume a stack."""
-        url = 'stacks/%s/actions' % stack_identifier
-        body = {'resume': None}
-        resp, body = self.post(url, json.dumps(body))
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp)
-
-    def list_resources(self, stack_identifier):
-        """Returns the details of a single resource."""
-        url = "stacks/%s/resources" % stack_identifier
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_resource(self, stack_identifier, resource_name):
-        """Returns the details of a single resource."""
-        url = "stacks/%s/resources/%s" % (stack_identifier, resource_name)
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def delete_stack(self, stack_identifier):
-        """Deletes the specified Stack."""
-        resp, _ = self.delete("stacks/%s" % str(stack_identifier))
-        self.expected_success(204, resp.status)
-        return rest_client.ResponseBody(resp)
-
-    def wait_for_stack_status(self, stack_identifier, status,
-                              failure_pattern='^.*_FAILED$'):
-        """Waits for a Stack to reach a given status."""
-        start = int(time.time())
-        fail_regexp = re.compile(failure_pattern)
-
-        while True:
-            try:
-                body = self.show_stack(stack_identifier)['stack']
-            except lib_exc.NotFound:
-                if status == 'DELETE_COMPLETE':
-                    return
-            stack_name = body['stack_name']
-            stack_status = body['stack_status']
-            if stack_status == status:
-                return body
-            if fail_regexp.search(stack_status):
-                raise exceptions.StackBuildErrorException(
-                    stack_identifier=stack_identifier,
-                    stack_status=stack_status,
-                    stack_status_reason=body['stack_status_reason'])
-
-            if int(time.time()) - start >= self.build_timeout:
-                message = ('Stack %s failed to reach %s status (current: %s) '
-                           'within the required time (%s s).' %
-                           (stack_name, status, stack_status,
-                            self.build_timeout))
-                raise lib_exc.TimeoutException(message)
-            time.sleep(self.build_interval)
-
-    def show_resource_metadata(self, stack_identifier, resource_name):
-        """Returns the resource's metadata."""
-        url = ('stacks/{stack_identifier}/resources/{resource_name}'
-               '/metadata'.format(**locals()))
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def list_events(self, stack_identifier):
-        """Returns list of all events for a stack."""
-        url = 'stacks/{stack_identifier}/events'.format(**locals())
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def list_resource_events(self, stack_identifier, resource_name):
-        """Returns list of all events for a resource from stack."""
-        url = ('stacks/{stack_identifier}/resources/{resource_name}'
-               '/events'.format(**locals()))
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_event(self, stack_identifier, resource_name, event_id):
-        """Returns the details of a single stack's event."""
-        url = ('stacks/{stack_identifier}/resources/{resource_name}/events'
-               '/{event_id}'.format(**locals()))
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_template(self, stack_identifier):
-        """Returns the template for the stack."""
-        url = ('stacks/{stack_identifier}/template'.format(**locals()))
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def _validate_template(self, post_body):
-        """Returns the validation request result."""
-        post_body = json.dumps(post_body)
-        resp, body = self.post('validate', post_body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def validate_template(self, template, parameters=None):
-        """Returns the validation result for a template with parameters."""
-        if parameters is None:
-            parameters = {}
-        post_body = {
-            'template': template,
-            'parameters': parameters,
-        }
-        return self._validate_template(post_body)
-
-    def validate_template_url(self, template_url, parameters=None):
-        """Returns the validation result for a template with parameters."""
-        if parameters is None:
-            parameters = {}
-        post_body = {
-            'template_url': template_url,
-            'parameters': parameters,
-        }
-        return self._validate_template(post_body)
-
-    def list_resource_types(self):
-        """List resource types."""
-        resp, body = self.get('resource_types')
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_resource_type(self, resource_type_name):
-        """Return the schema of a resource type."""
-        url = 'resource_types/%s' % resource_type_name
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, json.loads(body))
-
-    def show_resource_type_template(self, resource_type_name):
-        """Return the template of a resource type."""
-        url = 'resource_types/%s/template' % resource_type_name
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        return rest_client.ResponseBody(resp, json.loads(body))
-
-    def create_software_config(self, name=None, config=None, group=None,
-                               inputs=None, outputs=None, options=None):
-        headers, body = self._prep_software_config_create(
-            name, config, group, inputs, outputs, options)
-
-        url = 'software_configs'
-        resp, body = self.post(url, headers=headers, body=body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_software_config(self, conf_id):
-        """Returns a software configuration resource."""
-        url = 'software_configs/%s' % str(conf_id)
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def delete_software_config(self, conf_id):
-        """Deletes a specific software configuration."""
-        url = 'software_configs/%s' % str(conf_id)
-        resp, _ = self.delete(url)
-        self.expected_success(204, resp.status)
-        return rest_client.ResponseBody(resp)
-
-    def create_software_deploy(self, server_id=None, config_id=None,
-                               action=None, status=None,
-                               input_values=None, output_values=None,
-                               status_reason=None, signal_transport=None):
-        """Creates or updates a software deployment."""
-        headers, body = self._prep_software_deploy_update(
-            None, server_id, config_id, action, status, input_values,
-            output_values, status_reason, signal_transport)
-
-        url = 'software_deployments'
-        resp, body = self.post(url, headers=headers, body=body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def update_software_deploy(self, deploy_id=None, server_id=None,
-                               config_id=None, action=None, status=None,
-                               input_values=None, output_values=None,
-                               status_reason=None, signal_transport=None):
-        """Creates or updates a software deployment."""
-        headers, body = self._prep_software_deploy_update(
-            deploy_id, server_id, config_id, action, status, input_values,
-            output_values, status_reason, signal_transport)
-
-        url = 'software_deployments/%s' % str(deploy_id)
-        resp, body = self.put(url, headers=headers, body=body)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def list_software_deployments(self):
-        """Returns a list of all deployments."""
-        url = 'software_deployments'
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_software_deployment(self, deploy_id):
-        """Returns a specific software deployment."""
-        url = 'software_deployments/%s' % str(deploy_id)
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def show_software_deployment_metadata(self, server_id):
-        """Return a config metadata for a specific server."""
-        url = 'software_deployments/metadata/%s' % server_id
-        resp, body = self.get(url)
-        self.expected_success(200, resp.status)
-        body = json.loads(body)
-        return rest_client.ResponseBody(resp, body)
-
-    def delete_software_deploy(self, deploy_id):
-        """Deletes a specific software deployment."""
-        url = 'software_deployments/%s' % str(deploy_id)
-        resp, _ = self.delete(url)
-        self.expected_success(204, resp.status)
-        return rest_client.ResponseBody(resp)
-
-    def _prep_software_config_create(self, name=None, conf=None, group=None,
-                                     inputs=None, outputs=None, options=None):
-        """Prepares a software configuration body."""
-        post_body = {}
-        if name is not None:
-            post_body["name"] = name
-        if conf is not None:
-            post_body["config"] = conf
-        if group is not None:
-            post_body["group"] = group
-        if inputs is not None:
-            post_body["inputs"] = inputs
-        if outputs is not None:
-            post_body["outputs"] = outputs
-        if options is not None:
-            post_body["options"] = options
-        body = json.dumps(post_body)
-
-        headers = self.get_headers()
-        return headers, body
-
-    def _prep_software_deploy_update(self, deploy_id=None, server_id=None,
-                                     config_id=None, action=None, status=None,
-                                     input_values=None, output_values=None,
-                                     status_reason=None,
-                                     signal_transport=None):
-        """Prepares a deployment create or update (if an id was given)."""
-        post_body = {}
-
-        if deploy_id is not None:
-            post_body["id"] = deploy_id
-        if server_id is not None:
-            post_body["server_id"] = server_id
-        if config_id is not None:
-            post_body["config_id"] = config_id
-        if action is not None:
-            post_body["action"] = action
-        if status is not None:
-            post_body["status"] = status
-        if input_values is not None:
-            post_body["input_values"] = input_values
-        if output_values is not None:
-            post_body["output_values"] = output_values
-        if status_reason is not None:
-            post_body["status_reason"] = status_reason
-        if signal_transport is not None:
-            post_body["signal_transport"] = signal_transport
-        body = json.dumps(post_body)
-
-        headers = self.get_headers()
-        return headers, body
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index 1d0ee77..2695048 100755
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -21,6 +21,7 @@
 from tempest import exceptions
 from tempest.lib import exceptions as lib_exc
 from tempest.lib.services.compute import servers_client
+from tempest.lib.services.network import ports_client
 from tempest.lib.services.volume.v2 import volumes_client
 from tempest.tests import base
 import tempest.tests.utils as utils
@@ -59,11 +60,19 @@
     def test_wait_for_image_imported_to_stores(self):
         self.client.show_image.return_value = ({'status': 'active',
                                                 'stores': 'fake_store'})
+        self.client.info_stores.return_value = {
+            'stores': [{'id': 'fake_store',
+                        'description': 'A writable store'},
+                       {'id': 'another_fake_store',
+                        'description': 'A read-only store',
+                        'read-only': 'true'}]
+        }
         start_time = int(time.time())
         waiters.wait_for_image_imported_to_stores(
-            self.client, 'fake_image_id', 'fake_store')
+            self.client, 'fake_image_id', 'fake_store,another_fake_store')
         end_time = int(time.time())
-        # Ensure waiter returns before build_timeout
+        # Ensure waiter returns before build_timeout, and did not wait
+        # for the read-only store
         self.assertLess((end_time - start_time), 10)
 
     def test_wait_for_image_imported_to_stores_failure(self):
@@ -95,6 +104,22 @@
                           waiters.wait_for_image_imported_to_stores,
                           client, 'fake_image_id', 'fake_store')
 
+    def test_wait_for_image_imported_to_stores_no_stores(self):
+        client = mock.MagicMock()
+        client.show_image.return_value = ({'status': 'active'})
+        client.info_stores.side_effect = lib_exc.NotFound
+        client.build_timeout = 2
+        start_time = time.time()
+        waiters.wait_for_image_imported_to_stores(
+            client, 'fake_image_id', None)
+        end_time = time.time()
+        self.assertLess(end_time - start_time, 10)
+
+        exc = self.assertRaises(lib_exc.TimeoutException,
+                                waiters.wait_for_image_imported_to_stores,
+                                client, 'fake_image_id', 'foo,bar')
+        self.assertIn('cowardly', str(exc))
+
     def test_wait_for_image_copied_to_stores(self):
         self.client.show_image.return_value = ({
             'status': 'active',
@@ -588,6 +613,48 @@
         )
 
 
+class TestPortCreationWaiter(base.TestCase):
+    def test_wait_for_port_status(self):
+        """Test that the waiter replies with the port before the timeout"""
+
+        def client_response(self):
+            """Mock client response, replies with the final status after
+            2 calls
+            """
+            if mock_client.call_count >= 2:
+                return mock_port
+            else:
+                mock_client.call_count += 1
+                return mock_port_build
+
+        mock_port = {'port': {'id': '1234', 'status': "DOWN"}}
+        mock_port_build = {'port': {'id': '1234', 'status': "BUILD"}}
+        mock_client = mock.Mock(
+            spec=ports_client.PortsClient,
+            build_timeout=30, build_interval=1,
+            show_port=client_response)
+        fake_port_id = "1234"
+        fake_status = "DOWN"
+        self.assertEqual(mock_port, waiters.wait_for_port_status(
+            mock_client, fake_port_id, fake_status))
+
+    def test_wait_for_port_status_timeout(self):
+        """Negative test - checking that a timeout
+        presented by a small 'fake_timeout' and a static status of
+        'BUILD' in the mock will raise a timeout exception
+        """
+        mock_port = {'port': {'id': '1234', 'status': "BUILD"}}
+        mock_client = mock.Mock(
+            spec=ports_client.PortsClient,
+            build_timeout=2, build_interval=1,
+            show_port=lambda id: mock_port)
+        fake_port_id = "1234"
+        fake_status = "ACTIVE"
+        self.assertRaises(lib_exc.TimeoutException,
+                          waiters.wait_for_port_status, mock_client,
+                          fake_port_id, fake_status)
+
+
 class TestServerFloatingIPWaiters(base.TestCase):
 
     def test_wait_for_server_floating_ip_associate_timeout(self):
diff --git a/tempest/tests/lib/common/test_rest_client.py b/tempest/tests/lib/common/test_rest_client.py
index 1dea5f5..910756f 100644
--- a/tempest/tests/lib/common/test_rest_client.py
+++ b/tempest/tests/lib/common/test_rest_client.py
@@ -280,6 +280,26 @@
         body = self.rest_client._parse_resp(json.dumps(empty_list))
         self.assertEqual(empty_list, body)
 
+    def test_parse_top_key_match(self):
+        body = self.rest_client._parse_resp(json.dumps(self.dict_expected),
+                                            top_key_to_verify="body_dict")
+        self.assertEqual(self.dict_expected["body_dict"], body)
+
+
+class TestRestClientParseErrorRespJSON(BaseRestClientTestClass):
+
+    dict_expected = {"body_dict": {"fake_key": "fake_value"}}
+
+    def setUp(self):
+        self.fake_http = fake_http.fake_httplib2()
+        super(TestRestClientParseErrorRespJSON, self).setUp()
+
+    def test_parse_top_key_no_match(self):
+        self.assertRaises(AssertionError,
+                          self.rest_client._parse_resp,
+                          json.dumps(self.dict_expected),
+                          top_key_to_verify="body_key")
+
 
 class TestRestClientErrorCheckerJSON(base.TestCase):
     c_type = "application/json"
diff --git a/tempest/tests/lib/services/compute/test_server_external_events_client.py b/tempest/tests/lib/services/compute/test_server_external_events_client.py
new file mode 100644
index 0000000..63922b3
--- /dev/null
+++ b/tempest/tests/lib/services/compute/test_server_external_events_client.py
@@ -0,0 +1,56 @@
+# Copyright 2022 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib.services.compute import server_external_events_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestServerExternalEventsClient(base.BaseServiceTest):
+
+    events = [
+        {
+            "code": 200,
+            "name": "network-changed",
+            "server_uuid": "ff1df7b2-6772-45fd-9326-c0a3b05591c2",
+            "status": "completed",
+            "tag": "foo"
+        }
+    ]
+
+    events_req = [
+        {
+            "name": "network-changed",
+            "server_uuid": "ff1df7b2-6772-45fd-9326-c0a3b05591c2",
+        }
+    ]
+
+    def setUp(self):
+        super(TestServerExternalEventsClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = server_external_events_client.ServerExternalEventsClient(
+            fake_auth, 'compute', 'regionOne')
+
+    def _test_create_server_external_events(self, bytes_body=False):
+        expected = {"events": self.events}
+        self.check_service_client_function(
+            self.client.create_server_external_events,
+            'tempest.lib.common.rest_client.RestClient.post', expected,
+            bytes_body, events=self.events_req)
+
+    def test_create_server_external_events_str_body(self):
+        self._test_create_server_external_events(bytes_body=False)
+
+    def test_create_server_external_events_byte_body(self):
+        self._test_create_server_external_events(bytes_body=True)
diff --git a/tempest/tests/lib/services/image/v2/test_image_cache_client.py b/tempest/tests/lib/services/image/v2/test_image_cache_client.py
new file mode 100644
index 0000000..1a99115
--- /dev/null
+++ b/tempest/tests/lib/services/image/v2/test_image_cache_client.py
@@ -0,0 +1,64 @@
+# Copyright 2022 Red Hat, Inc.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib.services.image.v2 import image_cache_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestImageCacheClient(base.BaseServiceTest):
+    def setUp(self):
+        super(TestImageCacheClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = image_cache_client.ImageCacheClient(
+            fake_auth, 'image', 'regionOne')
+
+    def test_list_cache(self):
+        fake_result = {
+            "cached_images": [{
+                "image_id": "8f332e84-ea60-4501-8e11-5efcddb81f30",
+                "hits": 3,
+                "last_accessed": 1639578364.65118,
+                "last_modified": 1639389612.596718,
+                "size": 16300544
+            }],
+            "queued_images": ['1bea47ed-f6a9-463b-b423-14b9cca9ad27']}
+        self.check_service_client_function(
+            self.client.list_cache,
+            'tempest.lib.common.rest_client.RestClient.get',
+            fake_result,
+            mock_args=['cache'])
+
+    def test_cache_queue(self):
+        self.check_service_client_function(
+            self.client.cache_queue,
+            'tempest.lib.common.rest_client.RestClient.put',
+            {},
+            status=202,
+            image_id="e485aab9-0907-4973-921c-bb6da8a8fcf8")
+
+    def test_cache_delete(self):
+        fake_result = {}
+        self.check_service_client_function(
+            self.client.cache_delete,
+            'tempest.lib.common.rest_client.RestClient.delete',
+            fake_result, image_id="e485aab9-0907-4973-921c-bb6da8a8fcf8",
+            status=204)
+
+    def test_cache_clear_without_target(self):
+        fake_result = {}
+        self.check_service_client_function(
+            self.client.cache_clear,
+            'tempest.lib.common.rest_client.RestClient.delete',
+            fake_result, status=204)
diff --git a/tools/tempest-integrated-gate-networking-exclude-list.txt b/tools/tempest-integrated-gate-networking-exclude-list.txt
index 263b2e4..9d79a35 100644
--- a/tools/tempest-integrated-gate-networking-exclude-list.txt
+++ b/tools/tempest-integrated-gate-networking-exclude-list.txt
@@ -11,9 +11,11 @@
 
 # Skip Cinder, Glance and Swift only scenario tests.
 tempest.scenario.test_encrypted_cinder_volumes.TestEncryptedCinderVolumes.test_encrypted_cinder_volumes_luks
+tempest.scenario.test_encrypted_cinder_volumes.TestEncryptedCinderVolumes.test_encrypted_cinder_volumes_luks2
 tempest.scenario.test_encrypted_cinder_volumes.TestEncryptedCinderVolumes.test_encrypted_cinder_volumes_cryptsetup
 tempest.scenario.test_object_storage_basic_ops.TestObjectStorageBasicOps.test_swift_basic_ops
 tempest.scenario.test_object_storage_basic_ops.TestObjectStorageBasicOps.test_swift_acl_anonymous_download
 tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_boot_server_from_encrypted_volume_luks
+tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_boot_server_from_encrypted_volume_luks2
 tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_image_defined_boot_from_volume
 tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_create_server_from_volume_snapshot
diff --git a/tools/tempest-integrated-gate-placement-exclude-list.txt b/tools/tempest-integrated-gate-placement-exclude-list.txt
index efba796..eb68b32 100644
--- a/tools/tempest-integrated-gate-placement-exclude-list.txt
+++ b/tools/tempest-integrated-gate-placement-exclude-list.txt
@@ -11,9 +11,11 @@
 
 # Skip Cinder, Glance and Swift only scenario tests.
 tempest.scenario.test_encrypted_cinder_volumes.TestEncryptedCinderVolumes.test_encrypted_cinder_volumes_luks
+tempest.scenario.test_encrypted_cinder_volumes.TestEncryptedCinderVolumes.test_encrypted_cinder_volumes_luks2
 tempest.scenario.test_encrypted_cinder_volumes.TestEncryptedCinderVolumes.test_encrypted_cinder_volumes_cryptsetup
 tempest.scenario.test_object_storage_basic_ops.TestObjectStorageBasicOps.test_swift_basic_ops
 tempest.scenario.test_object_storage_basic_ops.TestObjectStorageBasicOps.test_swift_acl_anonymous_download
 tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_boot_server_from_encrypted_volume_luks
+tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_boot_server_from_encrypted_volume_luks2
 tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_image_defined_boot_from_volume
 tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_create_server_from_volume_snapshot
diff --git a/tox.ini b/tox.ini
index b07fdaf..c784293 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,7 +1,6 @@
 [tox]
-envlist = pep8,py36,py39,bashate,pip-check-reqs
+envlist = pep8,py39,bashate,pip-check-reqs
 minversion = 3.18.0
-skipsdist = True
 ignore_basepython_conflict = True
 
 [tempestenv]
@@ -24,10 +23,25 @@
     OS_STDERR_CAPTURE=1
     OS_TEST_TIMEOUT=160
     PYTHONWARNINGS=default::DeprecationWarning,ignore::DeprecationWarning:distutils,ignore::DeprecationWarning:site
-passenv = OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_TEST_TIMEOUT OS_TEST_LOCK_PATH TEMPEST_CONFIG TEMPEST_CONFIG_DIR http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY ZUUL_CACHE_DIR REQUIREMENTS_PIP_LOCATION GENERATE_TEMPEST_PLUGIN_LIST
+passenv =
+    OS_STDOUT_CAPTURE
+    OS_STDERR_CAPTURE
+    OS_TEST_TIMEOUT
+    OS_TEST_LOCK_PATH
+    TEMPEST_CONFIG
+    TEMPEST_CONFIG_DIR
+    http_proxy
+    HTTP_PROXY
+    https_proxy
+    HTTPS_PROXY
+    no_proxy
+    NO_PROXY
+    ZUUL_CACHE_DIR
+    REQUIREMENTS_PIP_LOCATION
+    GENERATE_TEMPEST_PLUGIN_LIST
 usedevelop = True
-install_command = pip install {opts} {packages}
-allowlist_externals = *
+allowlist_externals =
+    find
 deps =
     -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
     -r{toxinidir}/requirements.txt
diff --git a/zuul.d/integrated-gate.yaml b/zuul.d/integrated-gate.yaml
index 3dd8c49..5adf89e 100644
--- a/zuul.d/integrated-gate.yaml
+++ b/zuul.d/integrated-gate.yaml
@@ -86,6 +86,15 @@
         # Enbale horizon so that we can run horizon test.
         horizon: true
 
+# TODO(gmann): As per the 2023.1 testing runtime, we need to run at least
+# one job on Focal. This job can be removed as per the future testing
+# runtime (whenever we drop the Ubuntu Focal testing).
+- job:
+    name: tempest-full-ubuntu-focal
+    description: This is tempest-full python3 job on Ubuntu Focal(20.04)
+    parent: tempest-full-py3
+    nodeset: openstack-single-node-focal
+
 - job:
     name: tempest-full-centos-9-stream
     parent: tempest-full-py3
@@ -225,22 +234,11 @@
         TEMPEST_PLACEMENT_MIN_MICROVERSION: 'latest'
 
 - job:
-    name: tempest-multinode-full
-    parent: tempest-multinode-full-base
-    nodeset: openstack-two-node-focal
-    # This job runs on Focal from stable/victoria on.
-    branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train|ussuri)).*$
-    vars:
-      devstack_localrc:
-        USE_PYTHON3: False
-    group-vars:
-      subnode:
-        devstack_localrc:
-          USE_PYTHON3: False
-
-- job:
     name: tempest-multinode-full-py3
-    parent: tempest-multinode-full
+    parent: tempest-multinode-full-base
+    nodeset: openstack-two-node-jammy
+    # This job runs on ubuntu Jammy and after stable/zed.
+    branches: ^(?!stable/(pike|queens|rocky|stein|train|ussuri|victoria|wallaby|xena|yoga|zed)).*$
     vars:
       devstack_localrc:
         USE_PYTHON3: true
@@ -319,11 +317,20 @@
     vars:
       tox_envlist: full
       configure_swap_size: 4096
-      devstack_local_conf:
-        test-config:
-          "$TEMPEST_CONFIG":
-            validation:
-              ssh_key_type: 'ecdsa'
+      nslookup_target: 'opendev.org'
+
+- job:
+    name: tempest-centos9-stream-fips
+    parent: devstack-tempest
+    description: |
+      Integration testing for a FIPS enabled Centos 9 system
+    timeout: 10800
+    nodeset: devstack-single-node-centos-9-stream
+    pre-run: playbooks/enable-fips.yaml
+    vars:
+      tox_envlist: full
+      configure_swap_size: 4096
+      nslookup_target: 'opendev.org'
 
 - job:
     name: tempest-pg-full
@@ -338,6 +345,30 @@
         # ENABLE_FILE_INJECTION: true
         DATABASE_TYPE: postgresql
 
+- job:
+    name: tempest-full-enforce-scope-new-defaults
+    parent: tempest-full-py3
+    description: |
+      This job runs the Tempest tests with scope and new defaults enabled.
+    # TODO: remove this once https://review.opendev.org/c/openstack/neutron-lib/+/864213
+    # fix is released in neutron-lib
+    required-projects:
+      - openstack/neutron-lib
+      - openstack/neutron
+    vars:
+      devstack_localrc:
+        # Enabeling the scope and new defaults for services.
+        # NOTE: (gmann) We need to keep keystone scope check disable as
+        # services (except ironic) does not support the system scope and
+        # they need keystone to continue working with project scope. Until
+        # Keystone policies are changed to work for both system as well as
+        # for project scoped, we need to keep scope check disable for
+        # keystone.
+        NOVA_ENFORCE_SCOPE: true
+        CINDER_ENFORCE_SCOPE: true
+        GLANCE_ENFORCE_SCOPE: true
+        NEUTRON_ENFORCE_SCOPE: true
+
 - project-template:
     name: integrated-gate-networking
     description: |
@@ -350,12 +381,18 @@
         - grenade-skip-level:
             voting: false
         - tempest-integrated-networking
-        - openstacksdk-functional-devstack
+        # Do not run it on ussuri until below issue is fixed
+        # https://storyboard.openstack.org/#!/story/2010057
+        - openstacksdk-functional-devstack:
+            branches: ^(?!stable/ussuri).*$
     gate:
       jobs:
         - grenade
         - tempest-integrated-networking
-        - openstacksdk-functional-devstack
+        # Do not run it on ussuri until below issue is fixed
+        # https://storyboard.openstack.org/#!/story/2010057
+        - openstacksdk-functional-devstack:
+            branches: ^(?!stable/ussuri).*$
 
 - project-template:
     name: integrated-gate-compute
@@ -377,15 +414,20 @@
         # centos-8-stream is tested from wallaby -> yoga branches
         - tempest-integrated-compute-centos-8-stream:
             branches: ^stable/(wallaby|xena|yoga).*$
-        # centos-9-stream is tested from zed release onwards
-        - tempest-integrated-compute-centos-9-stream:
-            branches: ^(?!stable/(pike|queens|rocky|stein|train|ussuri|victoria|wallaby|xena|yoga)).*$
-        - openstacksdk-functional-devstack
+        # Do not run it on ussuri until below issue is fixed
+        # https://storyboard.openstack.org/#!/story/2010057
+        - openstacksdk-functional-devstack:
+            branches: ^(?!stable/ussuri).*$
     gate:
       jobs:
         - tempest-integrated-compute
-        - tempest-integrated-compute-centos-9-stream
-        - openstacksdk-functional-devstack
+        - openstacksdk-functional-devstack:
+            branches: ^(?!stable/ussuri).*$
+    periodic-weekly:
+      jobs:
+        # centos-9-stream is tested from zed release onwards
+        - tempest-integrated-compute-centos-9-stream:
+            branches: ^(?!stable/(pike|queens|rocky|stein|train|ussuri|victoria|wallaby|xena|yoga)).*$
 
 - project-template:
     name: integrated-gate-placement
@@ -400,12 +442,18 @@
         - grenade-skip-level:
             voting: false
         - tempest-integrated-placement
-        - openstacksdk-functional-devstack
+        # Do not run it on ussuri until below issue is fixed
+        # https://storyboard.openstack.org/#!/story/2010057
+        - openstacksdk-functional-devstack:
+            branches: ^(?!stable/ussuri).*$
     gate:
       jobs:
         - grenade
         - tempest-integrated-placement
-        - openstacksdk-functional-devstack
+        # Do not run it on ussuri until below issue is fixed
+        # https://storyboard.openstack.org/#!/story/2010057
+        - openstacksdk-functional-devstack:
+            branches: ^(?!stable/ussuri).*$
 
 - project-template:
     name: integrated-gate-storage
@@ -420,12 +468,18 @@
         - grenade-skip-level:
             voting: false
         - tempest-integrated-storage
-        - openstacksdk-functional-devstack
+        # Do not run it on ussuri until below issue is fixed
+        # https://storyboard.openstack.org/#!/story/2010057
+        - openstacksdk-functional-devstack:
+            branches: ^(?!stable/ussuri).*$
     gate:
       jobs:
         - grenade
         - tempest-integrated-storage
-        - openstacksdk-functional-devstack
+        # Do not run it on ussuri until below issue is fixed
+        # https://storyboard.openstack.org/#!/story/2010057
+        - openstacksdk-functional-devstack:
+            branches: ^(?!stable/ussuri).*$
 
 - project-template:
     name: integrated-gate-object-storage
@@ -438,9 +492,15 @@
       jobs:
         - grenade
         - tempest-integrated-object-storage
-        - openstacksdk-functional-devstack
+        # Do not run it on ussuri until below issue is fixed
+        # https://storyboard.openstack.org/#!/story/2010057
+        - openstacksdk-functional-devstack:
+            branches: ^(?!stable/ussuri).*$
     gate:
       jobs:
         - grenade
         - tempest-integrated-object-storage
-        - openstacksdk-functional-devstack
+        # Do not run it on ussuri until below issue is fixed
+        # https://storyboard.openstack.org/#!/story/2010057
+        - openstacksdk-functional-devstack:
+            branches: ^(?!stable/ussuri).*$
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index 6ab7eed..966cc9a 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -8,10 +8,9 @@
     check:
       jobs:
         - openstack-tox-pep8
-        - openstack-tox-py36
-        - openstack-tox-py37
         - openstack-tox-py38
         - openstack-tox-py39
+        - openstack-tox-py310
         - tempest-full-parallel:
             # Define list of irrelevant files to use everywhere else
             irrelevant-files: &tempest-irrelevant-files
@@ -29,22 +28,20 @@
               - ^.mailmap$
         - tempest-full-py3:
             irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-ubuntu-focal:
+            irrelevant-files: *tempest-irrelevant-files
         - tempest-full-py3-ipv6:
             voting: false
             irrelevant-files: *tempest-irrelevant-files
         - glance-multistore-cinder-import:
             voting: false
             irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-zed:
+            irrelevant-files: *tempest-irrelevant-files
         - tempest-full-yoga:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-full-xena:
             irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-wallaby-py3:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-victoria-py3:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-slow-wallaby:
-            irrelevant-files: *tempest-irrelevant-files
         - tempest-multinode-full-py3:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-tox-plugin-sanity-check:
@@ -101,9 +98,13 @@
         - tempest-slow-py3:
             irrelevant-files: *tempest-irrelevant-files
         - nova-live-migration:
-            voting: false
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-enforce-scope-new-defaults:
             irrelevant-files: *tempest-irrelevant-files
         - devstack-plugin-ceph-tempest-py3:
+            # TODO(kopecmartin): make it voting once the below bug is fixed
+            # https://bugs.launchpad.net/devstack-plugin-ceph/+bug/1975648
+            voting: false
             irrelevant-files: *tempest-irrelevant-files
         - neutron-ovs-grenade-multinode:
             irrelevant-files: *tempest-irrelevant-files
@@ -123,16 +124,20 @@
         - openstack-tox-bashate:
             irrelevant-files: *tempest-irrelevant-files-2
         - tempest-full-centos-9-stream:
+            # TODO(gmann): make it voting once below fix is merged
+            # https://review.opendev.org/c/openstack/tempest/+/842140
+            voting: false
             irrelevant-files: *tempest-irrelevant-files
     gate:
       jobs:
         - openstack-tox-pep8
-        - openstack-tox-py36
-        - openstack-tox-py37
         - openstack-tox-py38
         - openstack-tox-py39
+        - openstack-tox-py310
         - tempest-slow-py3:
             irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-ubuntu-focal:
+            irrelevant-files: *tempest-irrelevant-files
         - neutron-ovs-grenade-multinode:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-full-py3:
@@ -141,12 +146,19 @@
             irrelevant-files: *tempest-irrelevant-files
         - tempest-ipv6-only:
             irrelevant-files: *tempest-irrelevant-files-3
-        - devstack-plugin-ceph-tempest-py3:
+        - tempest-multinode-full-py3:
             irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-centos-9-stream:
+        - tempest-full-enforce-scope-new-defaults:
+            irrelevant-files: *tempest-irrelevant-files
+        #- devstack-plugin-ceph-tempest-py3:
+        #    irrelevant-files: *tempest-irrelevant-files
+        #- tempest-full-centos-9-stream:
+        #    irrelevant-files: *tempest-irrelevant-files
+        - nova-live-migration:
             irrelevant-files: *tempest-irrelevant-files
     experimental:
       jobs:
+        - nova-multi-cell
         - tempest-with-latest-microversion
         - tempest-stestr-master
         - tempest-cinder-v2-api:
@@ -161,19 +173,19 @@
             irrelevant-files: *tempest-irrelevant-files
         - tempest-pg-full:
             irrelevant-files: *tempest-irrelevant-files
-        - tempest-centos8-stream-fips:
+        - tempest-centos9-stream-fips:
             irrelevant-files: *tempest-irrelevant-files
     periodic-stable:
       jobs:
+        - tempest-full-zed
         - tempest-full-yoga
         - tempest-full-xena
-        - tempest-full-wallaby-py3
-        - tempest-full-victoria-py3
+        - tempest-slow-zed
         - tempest-slow-yoga
         - tempest-slow-xena
-        - tempest-slow-wallaby
     periodic:
       jobs:
         - tempest-all
         - tempest-full-oslo-master
         - tempest-stestr-master
+        - tempest-centos9-stream-fips
diff --git a/zuul.d/stable-jobs.yaml b/zuul.d/stable-jobs.yaml
index 00b40f5..fb2300b 100644
--- a/zuul.d/stable-jobs.yaml
+++ b/zuul.d/stable-jobs.yaml
@@ -1,40 +1,41 @@
 # NOTE(gmann): This file includes all stable release jobs definition.
 - job:
+    name: tempest-full-zed
+    parent: tempest-full-py3
+    nodeset: openstack-single-node-focal
+    override-checkout: stable/zed
+
+- job:
     name: tempest-full-yoga
     parent: tempest-full-py3
+    nodeset: openstack-single-node-focal
     override-checkout: stable/yoga
 
 - job:
     name: tempest-full-xena
     parent: tempest-full-py3
+    nodeset: openstack-single-node-focal
     override-checkout: stable/xena
 
 - job:
-    name: tempest-full-wallaby-py3
-    parent: tempest-full-py3
-    override-checkout: stable/wallaby
-
-- job:
-    name: tempest-full-victoria-py3
-    parent: tempest-full-py3
-    override-checkout: stable/victoria
+    name: tempest-slow-zed
+    parent: tempest-slow-py3
+    nodeset: openstack-two-node-focal
+    override-checkout: stable/zed
 
 - job:
     name: tempest-slow-yoga
     parent: tempest-slow-py3
+    nodeset: openstack-two-node-focal
     override-checkout: stable/yoga
 
 - job:
     name: tempest-slow-xena
     parent: tempest-slow-py3
+    nodeset: openstack-two-node-focal
     override-checkout: stable/xena
 
 - job:
-    name: tempest-slow-wallaby
-    parent: tempest-slow-py3
-    override-checkout: stable/wallaby
-
-- job:
     name: tempest-full-py3
     parent: devstack-tempest
     # This job version is with swift disabled on py3
@@ -90,6 +91,69 @@
         neutron-qos: true
 
 - job:
+    name: tempest-multinode-full-py3
+    parent: tempest-multinode-full
+    nodeset: openstack-two-node-bionic
+    # This job runs on Bionic.
+    branches:
+      - stable/stein
+      - stable/train
+      - stable/ussuri
+    vars:
+      devstack_localrc:
+        USE_PYTHON3: true
+      devstack_plugins:
+        neutron: https://opendev.org/openstack/neutron
+      devstack_services:
+        neutron-trunk: true
+    group-vars:
+      subnode:
+        devstack_localrc:
+          USE_PYTHON3: true
+
+- job:
+    name: tempest-multinode-full-py3
+    parent: tempest-multinode-full
+    nodeset: openstack-two-node-focal
+    # This job runs on Focal and supposed to run until stable/zed.
+    branches:
+      - stable/victoria
+      - stable/wallaby
+      - stable/xena
+      - stable/yoga
+      - stable/zed
+    vars:
+      devstack_localrc:
+        USE_PYTHON3: true
+      devstack_plugins:
+        neutron: https://opendev.org/openstack/neutron
+      devstack_services:
+        neutron-trunk: true
+    group-vars:
+      subnode:
+        devstack_localrc:
+          USE_PYTHON3: true
+
+- job:
+    name: tempest-multinode-full
+    parent: tempest-multinode-full-base
+    nodeset: openstack-two-node-focal
+    # This job runs on Focal and on python2. This is for stable/victoria to stable/zed.
+    branches:
+      - stable/victoria
+      - stable/wallaby
+      - stable/xena
+      - stable/yoga
+      - stable/zed
+    vars:
+      devstack_localrc:
+        USE_PYTHON3: False
+    group-vars:
+      subnode:
+        devstack_localrc:
+          USE_PYTHON3: False
+
+- job:
     name: tempest-multinode-full
     parent: tempest-multinode-full-base
     nodeset: openstack-two-node-bionic
diff --git a/zuul.d/tempest-specific.yaml b/zuul.d/tempest-specific.yaml
index 822feaa..ca9ba7f 100644
--- a/zuul.d/tempest-specific.yaml
+++ b/zuul.d/tempest-specific.yaml
@@ -73,7 +73,7 @@
     parent: tox
     description: |
       Run tempest plugin sanity check script using tox.
-    nodeset: ubuntu-focal
+    nodeset: ubuntu-jammy
     vars:
       tox_envlist: plugin-sanity-check
     timeout: 5000