Merge "Use LOG.warning instead of deprecated LOG.warn"
diff --git a/doc/source/microversion_testing.rst b/doc/source/microversion_testing.rst
index 0b80b72..ecf2930 100644
--- a/doc/source/microversion_testing.rst
+++ b/doc/source/microversion_testing.rst
@@ -350,6 +350,10 @@
.. _2.32: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id29
+ * `2.33`_
+
+ .. _2.33: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id30
+
* `2.36`_
.. _2.36: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#microversion
@@ -428,7 +432,11 @@
* `2.79`_
- .. _2.79: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#maximum-in-train
+ .. _2.79: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#maximum-in-train
+
+ * `2.86`_
+
+ .. _2.86: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id79
* Volume
diff --git a/playbooks/enable-fips.yaml b/playbooks/enable-fips.yaml
new file mode 100644
index 0000000..c8f042d
--- /dev/null
+++ b/playbooks/enable-fips.yaml
@@ -0,0 +1,4 @@
+- hosts: all
+ tasks:
+ - include_role:
+ name: enable-fips
diff --git a/releasenotes/notes/add-ssh-key-type-38d7a2f900d79842.yaml b/releasenotes/notes/add-ssh-key-type-38d7a2f900d79842.yaml
new file mode 100644
index 0000000..fef3004
--- /dev/null
+++ b/releasenotes/notes/add-ssh-key-type-38d7a2f900d79842.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add parameter to specify the SSH key type. Current options are 'rsa'
+ (which is the default) and 'ecdsa'. Tempest now supports the importing
+ and generation of both 'rsa' and 'ecdsa' SSH key types.
diff --git a/releasenotes/notes/deprecate-old-api-microversion-fixture-a471aac985c0f3fb.yaml b/releasenotes/notes/deprecate-old-api-microversion-fixture-a471aac985c0f3fb.yaml
new file mode 100644
index 0000000..652f7fa
--- /dev/null
+++ b/releasenotes/notes/deprecate-old-api-microversion-fixture-a471aac985c0f3fb.yaml
@@ -0,0 +1,7 @@
+---
+deprecations:
+ - |
+ Old APIMicroversionFixture classes ``tempest.api.compute.api_microversion_fixture.APIMicroversionFixture``
+ and ``tempest.api.volume.api_microversion_fixture.APIMicroversionFixture``
+ has been deprecated for removal in favor of new location
+ ``tempest.lib.common.api_microversion_fixture.APIMicroversionFixture``
diff --git a/releasenotes/notes/floating-ips-port-forwarding-client-cf8820b910bd7f4d.yaml b/releasenotes/notes/floating-ips-port-forwarding-client-cf8820b910bd7f4d.yaml
new file mode 100644
index 0000000..3aaec69
--- /dev/null
+++ b/releasenotes/notes/floating-ips-port-forwarding-client-cf8820b910bd7f4d.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add a new client to lists, creates, shows information for,
+ updates and deletes neutron floating ips port forwarding
+ resource.
diff --git a/releasenotes/notes/set-default-value-of-concurrency-to-2-d916d5c31e3725d5.yaml b/releasenotes/notes/set-default-value-of-concurrency-to-2-d916d5c31e3725d5.yaml
new file mode 100644
index 0000000..0d964a9
--- /dev/null
+++ b/releasenotes/notes/set-default-value-of-concurrency-to-2-d916d5c31e3725d5.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ [`bug 1948935 <https://bugs.launchpad.net/tempest/+bug/1948935>`_]
+ The default value of account-generator --concurrency parameter is now
+ set to 2 instead of 1.
diff --git a/requirements.txt b/requirements.txt
index c71cabe..c4c7fcc 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -6,6 +6,7 @@
jsonschema>=3.2.0 # MIT
testtools>=2.2.0 # MIT
paramiko>=2.7.0 # LGPLv2.1+
+cryptography>=2.1 # BSD/Apache-2.0
netaddr>=0.7.18 # BSD
oslo.concurrency>=3.26.0 # Apache-2.0
oslo.config>=5.2.0 # Apache-2.0
@@ -20,4 +21,3 @@
PrettyTable>=0.7.1 # BSD
urllib3>=1.21.1 # MIT
debtcollector>=1.2.0 # Apache-2.0
-unittest2>=1.1.0 # BSD
diff --git a/roles/run-tempest/README.rst b/roles/run-tempest/README.rst
index 0c72b69..1919393 100644
--- a/roles/run-tempest/README.rst
+++ b/roles/run-tempest/README.rst
@@ -81,7 +81,7 @@
.. zuul:rolevar:: stable_constraints_file
:default: ''
- Upper constraints file to be used for stable branch till stable/stein.
+ Upper constraints file to be used for stable branch till stable/train.
.. zuul:rolevar:: tempest_tox_environment
:default: ''
diff --git a/roles/run-tempest/tasks/main.yaml b/roles/run-tempest/tasks/main.yaml
index a8b3ede..397de1e 100644
--- a/roles/run-tempest/tasks/main.yaml
+++ b/roles/run-tempest/tasks/main.yaml
@@ -25,11 +25,11 @@
target_branch: "{{ zuul.override_checkout }}"
when: zuul.override_checkout is defined
-- name: Use stable branch upper-constraints till stable/stein
+- name: Use stable branch upper-constraints till stable/train
set_fact:
# TOX_CONSTRAINTS_FILE is new name, UPPER_CONSTRAINTS_FILE is old one, best to set both
tempest_tox_environment: "{{ tempest_tox_environment | combine({'UPPER_CONSTRAINTS_FILE': stable_constraints_file}) | combine({'TOX_CONSTRAINTS_FILE': stable_constraints_file}) }}"
- when: target_branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"]
+ when: target_branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein", "stable/train"]
- name: Use Configured upper-constraints for non-master Tempest
set_fact:
@@ -78,6 +78,17 @@
exclude_list_option: "--exclude-list={{ tempest_test_exclude_list|quote }}"
when: exclude_list_stat.stat.exists
+- name: stable/train workaround to fallback exclude-list to blacklist
+ # NOTE(gmann): stable/train use Tempest 26.1.0 and with stestr 2.5.1
+ # (beacause of upper constraints of stestr 2.5.1 in stable/train) which
+ # does not have new args exclude-list so let's fallback to old arg
+ # if new arg is passed.
+ set_fact:
+ exclude_list_option: "--blacklist-file={{ tempest_test_exclude_list|quote }}"
+ when:
+ - tempest_test_exclude_list is defined
+ - target_branch == "stable/train"
+
# TODO(kopecmartin) remove this after all consumers of the role have switched
# to tempest_exclude_regex option, until then it's kept here for the backward
# compatibility
@@ -94,6 +105,19 @@
when:
- tempest_black_regex is not defined
- tempest_exclude_regex is defined
+ - target_branch != "stable/train"
+
+- name: stable/train workaround to fallback exclude-regex to black-regex
+ # NOTE(gmann): stable/train use Tempest 26.1.0 and with stestr 2.5.1
+ # (beacause of upper constraints of stestr 2.5.1 in stable/train) which
+ # does not have new args exclude-regex so let's fallback to old arg
+ # if new arg is passed.
+ set_fact:
+ tempest_test_exclude_regex: "--black-regex={{tempest_exclude_regex|quote}}"
+ when:
+ - tempest_black_regex is not defined
+ - tempest_exclude_regex is defined
+ - target_branch == "stable/train"
- name: Run Tempest
command: tox -e {{tox_envlist}} {{tox_extra_args}} -- {{tempest_test_regex|quote}} \
diff --git a/setup.cfg b/setup.cfg
index d885db0..a41eccf 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -18,6 +18,7 @@
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
+ Programming Language :: Python :: 3.9
Programming Language :: Python :: 3 :: Only
Programming Language :: Python :: Implementation :: CPython
diff --git a/tempest/api/compute/admin/test_agents.py b/tempest/api/compute/admin/test_agents.py
index 4cc5fdd..f54fb22 100644
--- a/tempest/api/compute/admin/test_agents.py
+++ b/tempest/api/compute/admin/test_agents.py
@@ -119,3 +119,5 @@
self.assertIn(agent_id_xen, map(lambda x: x['agent_id'], agents))
self.assertNotIn(body['agent_id'], map(lambda x: x['agent_id'],
agents))
+ for agent in agents:
+ self.assertEqual(agent_xen['hypervisor'], agent['hypervisor'])
diff --git a/tempest/api/compute/admin/test_aggregates.py b/tempest/api/compute/admin/test_aggregates.py
index 2716259..a6c6535 100644
--- a/tempest/api/compute/admin/test_aggregates.py
+++ b/tempest/api/compute/admin/test_aggregates.py
@@ -17,6 +17,7 @@
from tempest.api.compute import base
from tempest.common import tempest_fixtures as fixtures
+from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
@@ -237,6 +238,10 @@
wait_until='ACTIVE')
server_host = self.get_host_for_server(server['id'])
self.assertEqual(host, server_host)
+ self.servers_client.delete_server(server['id'])
+ # NOTE(gmann): We need to wait for the server to delete before
+ # addCleanup remove the host from aggregate.
+ waiters.wait_for_server_termination(self.servers_client, server['id'])
class AggregatesAdminTestV241(AggregatesAdminTestBase):
diff --git a/tempest/api/compute/admin/test_flavors_extra_specs.py b/tempest/api/compute/admin/test_flavors_extra_specs.py
index 4c531b3..10018fe 100644
--- a/tempest/api/compute/admin/test_flavors_extra_specs.py
+++ b/tempest/api/compute/admin/test_flavors_extra_specs.py
@@ -127,3 +127,34 @@
self.flavor['id'], 'hw:numa_nodes')
self.assertEqual(body['hw:numa_nodes'], '1')
self.assertNotIn('hw:cpu_policy', body)
+
+
+class FlavorMetadataValidation(base.BaseV2ComputeAdminTest):
+
+ min_microversion = '2.86'
+
+ @classmethod
+ def resource_setup(cls):
+ super(FlavorMetadataValidation, cls).resource_setup()
+ cls.flavor_name_prefix = 'test_flavor_validate_metadata_'
+ cls.ram = 512
+ cls.vcpus = 1
+ cls.disk = 10
+ cls.ephemeral = 10
+ cls.swap = 1024
+ cls.rxtx = 2
+
+ @decorators.idempotent_id('d3114f03-b0f2-4dc7-be11-70c0abc178b3')
+ def test_flavor_update_with_custom_namespace(self):
+ """Test flavor creation with a custom namespace, key and value"""
+ flavor_name = data_utils.rand_name(self.flavor_name_prefix)
+ flavor_id = self.create_flavor(ram=self.ram,
+ vcpus=self.vcpus,
+ disk=self.disk,
+ name=flavor_name)['id']
+ specs = {'hw:cpu_policy': 'shared', 'foo:bar': 'baz'}
+ body = self.admin_flavors_client.set_flavor_extra_spec(
+ flavor_id,
+ **specs)['extra_specs']
+ self.assertEqual(body['foo:bar'], 'baz')
+ self.assertEqual(body['hw:cpu_policy'], 'shared')
diff --git a/tempest/api/compute/admin/test_hypervisor.py b/tempest/api/compute/admin/test_hypervisor.py
index 347193d..c7a1201 100644
--- a/tempest/api/compute/admin/test_hypervisor.py
+++ b/tempest/api/compute/admin/test_hypervisor.py
@@ -145,3 +145,26 @@
hypers = self.client.search_hypervisor(
hypers[0]['hypervisor_hostname'])['hypervisors']
self.assertNotEmpty(hypers, "No hypervisors found.")
+
+
+class HypervisorAdminV253TestBase(base.BaseV2ComputeAdminTest):
+ """Tests Hypervisors API above 2.53 that require admin privileges"""
+
+ min_microversion = '2.53'
+
+ @classmethod
+ def setup_clients(cls):
+ super(HypervisorAdminV253TestBase, cls).setup_clients()
+ cls.client = cls.os_admin.hypervisor_client
+
+ @decorators.idempotent_id('4ab54a14-77a2-4e39-b9d2-1306d157c705')
+ def test_list_show_detail_hypervisors(self):
+ """Verify the list, list details, and show hypevisors
+
+ This verify the Hypervisor API response schema with v2.53 microversion
+ """
+ self.client.list_hypervisors(
+ detail=True, with_servers=True)['hypervisors']
+ hypers = self.client.list_hypervisors(with_servers=True)['hypervisors']
+ self.client.show_hypervisor(
+ hypers[0]['id'], with_servers=True)['hypervisor']
diff --git a/tempest/api/compute/admin/test_volume.py b/tempest/api/compute/admin/test_volume.py
index cf8c560..549d4fb 100644
--- a/tempest/api/compute/admin/test_volume.py
+++ b/tempest/api/compute/admin/test_volume.py
@@ -48,8 +48,8 @@
:param return image_id: The UUID of the newly created image.
"""
- image = self.image_client.show_image(CONF.compute.image_ref)
- image_data = self.image_client.show_image_file(
+ image = self.admin_image_client.show_image(CONF.compute.image_ref)
+ image_data = self.admin_image_client.show_image_file(
CONF.compute.image_ref).data
image_file = io.BytesIO(image_data)
create_dict = {
@@ -60,11 +60,11 @@
'visibility': 'public',
}
create_dict.update(kwargs)
- new_image = self.image_client.create_image(**create_dict)
- self.addCleanup(self.image_client.wait_for_resource_deletion,
+ new_image = self.admin_image_client.create_image(**create_dict)
+ self.addCleanup(self.admin_image_client.wait_for_resource_deletion,
new_image['id'])
- self.addCleanup(self.image_client.delete_image, new_image['id'])
- self.image_client.store_image_file(new_image['id'], image_file)
+ self.addCleanup(self.admin_image_client.delete_image, new_image['id'])
+ self.admin_image_client.store_image_file(new_image['id'], image_file)
return new_image['id']
diff --git a/tempest/api/compute/api_microversion_fixture.py b/tempest/api/compute/api_microversion_fixture.py
index 695af52..1f55a65 100644
--- a/tempest/api/compute/api_microversion_fixture.py
+++ b/tempest/api/compute/api_microversion_fixture.py
@@ -13,14 +13,23 @@
# under the License.
import fixtures
+from oslo_log import log as logging
from tempest.lib.services.compute import base_compute_client
+LOG = logging.getLogger(__name__)
+
class APIMicroversionFixture(fixtures.Fixture):
def __init__(self, compute_microversion):
self.compute_microversion = compute_microversion
+ new_fixture = (
+ 'tempest.lib.common.api_microversion_fixture.'
+ 'APIMicroversionFixture')
+ LOG.warning("%s class is deprecated and moved to %s. It"
+ " will be removed in Z cycle.",
+ self.__class__.__name__, new_fixture)
def _setUp(self):
super(APIMicroversionFixture, self)._setUp()
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 922a14c..a110eb4 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -21,7 +21,6 @@
from tempest.common import waiters
from tempest import config
from tempest import exceptions
-from tempest.lib.common import api_microversion_fixture
from tempest.lib.common import api_version_request
from tempest.lib.common import api_version_utils
from tempest.lib.common.utils import data_utils
@@ -164,6 +163,11 @@
api_version_utils.select_request_microversion(
cls.placement_min_microversion,
CONF.placement.min_microversion))
+ cls.setup_api_microversion_fixture(
+ compute_microversion=cls.request_microversion,
+ volume_microversion=cls.volume_request_microversion,
+ placement_microversion=cls.placement_request_microversion)
+
cls.build_interval = CONF.compute.build_interval
cls.build_timeout = CONF.compute.build_timeout
cls.image_ref = CONF.compute.image_ref
@@ -483,28 +487,8 @@
:param validation_resources: The dict of validation resources
provisioned for the server.
"""
- if CONF.validation.connect_method == 'floating':
- if validation_resources:
- return validation_resources['floating_ip']['ip']
- else:
- msg = ('When validation.connect_method equals floating, '
- 'validation_resources cannot be None')
- raise lib_exc.InvalidParam(invalid_param=msg)
- elif CONF.validation.connect_method == 'fixed':
- addresses = server['addresses'][CONF.validation.network_for_ssh]
- for address in addresses:
- if address['version'] == CONF.validation.ip_version_for_ssh:
- return address['addr']
- raise exceptions.ServerUnreachable(server_id=server['id'])
- else:
- raise lib_exc.InvalidConfiguration()
-
- def setUp(self):
- super(BaseV2ComputeTest, self).setUp()
- self.useFixture(api_microversion_fixture.APIMicroversionFixture(
- compute_microversion=self.request_microversion,
- volume_microversion=self.volume_request_microversion,
- placement_microversion=self.placement_request_microversion))
+ return compute.get_server_ip(
+ server, validation_resources=validation_resources)
@classmethod
def create_volume(cls, image_ref=None, **kwargs):
@@ -658,7 +642,7 @@
cls.os_admin.availability_zone_client)
cls.admin_flavors_client = cls.os_admin.flavors_client
cls.admin_servers_client = cls.os_admin.servers_client
- cls.image_client = cls.os_admin.image_client_v2
+ cls.admin_image_client = cls.os_admin.image_client_v2
cls.admin_assisted_volume_snapshots_client = \
cls.os_admin.assisted_volume_snapshots_client
diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
index ac18442..efecd6c 100644
--- a/tempest/api/compute/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/servers/test_attach_interfaces.py
@@ -68,7 +68,8 @@
self.image_ssh_password,
validation_resources['keypair']['private_key'],
server=server,
- servers_client=self.servers_client)
+ servers_client=self.servers_client,
+ ssh_key_type=CONF.validation.ssh_key_type)
linux_client.validate_authentication()
def _create_server_get_interfaces(self):
diff --git a/tempest/api/compute/servers/test_device_tagging.py b/tempest/api/compute/servers/test_device_tagging.py
index 58d4d7d..f5c9080 100644
--- a/tempest/api/compute/servers/test_device_tagging.py
+++ b/tempest/api/compute/servers/test_device_tagging.py
@@ -35,6 +35,8 @@
class DeviceTaggingBase(base.BaseV2ComputeTest):
+ credentials = ['primary', 'admin']
+
@classmethod
def skip_checks(cls):
super(DeviceTaggingBase, cls).skip_checks()
@@ -54,6 +56,7 @@
cls.ports_client = cls.os_primary.ports_client
cls.subnets_client = cls.os_primary.subnets_client
cls.interfaces_client = cls.os_primary.interfaces_client
+ cls.servers_admin_client = cls.os_admin.servers_client
@classmethod
def setup_credentials(cls):
@@ -332,7 +335,9 @@
def verify_device_metadata(self, md_json):
try:
md_dict = json.loads(md_json)
- except (json_decoder.JSONDecodeError, TypeError):
+ except (json_decoder.JSONDecodeError, TypeError) as e:
+ LOG.warning(
+ 'Failed to decode json metadata: %s, %s', str(e), str(md_json))
return False
found_devices = [d['tags'][0] for d in md_dict['devices']
@@ -342,7 +347,9 @@
sorted(found_devices),
sorted(['nic-tag', 'volume-tag']))
return True
- except Exception:
+ except Exception as e:
+ LOG.warning(
+ 'Failed to parse metadata: %s, %s', str(e), str(md_json))
return False
def verify_empty_devices(self, md_json):
@@ -422,11 +429,13 @@
self.servers_client.detach_volume(server['id'], volume['id'])
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'available')
- self.interfaces_client.delete_interface(server['id'],
- interface['port_id'])
- waiters.wait_for_interface_detach(self.interfaces_client,
+ req_id = self.interfaces_client.delete_interface(
+ server['id'], interface['port_id']
+ ).response['x-openstack-request-id']
+ waiters.wait_for_interface_detach(self.servers_admin_client,
server['id'],
- interface['port_id'])
+ interface['port_id'],
+ req_id)
# FIXME(mriedem): The assertion that the tagged devices are removed
# from the metadata for the server is being skipped until bug 1775947
# is fixed.
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 152e7e8..c415c00 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -43,6 +43,17 @@
super(ServerActionsTestJSON, self).setUp()
# Check if the server is in a clean state after test
try:
+ validation_resources = self.get_class_validation_resources(
+ self.os_primary)
+ # _test_rebuild_server test compares ip address attached to the
+ # server before and after the rebuild, in order to avoid
+ # a situation when a newly created server doesn't have a floating
+ # ip attached at the beginning of the test_rebuild_server let's
+ # make sure right here the floating ip is attached
+ waiters.wait_for_server_floating_ip(
+ self.client,
+ self.client.show_server(self.server_id)['server'],
+ validation_resources['floating_ip'])
waiters.wait_for_server_status(self.client,
self.server_id, 'ACTIVE')
except lib_exc.NotFound:
diff --git a/tempest/api/identity/admin/v3/test_domains.py b/tempest/api/identity/admin/v3/test_domains.py
index 32ccb9e..419c6c7 100644
--- a/tempest/api/identity/admin/v3/test_domains.py
+++ b/tempest/api/identity/admin/v3/test_domains.py
@@ -157,4 +157,4 @@
self.addCleanup(self.delete_domain, domain['id'])
expected_data = {'name': d_name, 'enabled': True}
self.assertEqual('', domain['description'])
- self.assertDictContainsSubset(expected_data, domain)
+ self.assertLessEqual(expected_data.items(), domain.items())
diff --git a/tempest/api/identity/admin/v3/test_services.py b/tempest/api/identity/admin/v3/test_services.py
index a649d27..fb3b03e 100644
--- a/tempest/api/identity/admin/v3/test_services.py
+++ b/tempest/api/identity/admin/v3/test_services.py
@@ -44,7 +44,7 @@
# Verifying response body of create service
expected_data = {'name': name, 'type': serv_type, 'description': desc}
- self.assertDictContainsSubset(expected_data, create_service)
+ self.assertLessEqual(expected_data.items(), create_service.items())
# Update description
s_id = create_service['id']
@@ -61,7 +61,7 @@
resp3_desc = fetched_service['description']
self.assertEqual(resp2_desc, resp3_desc)
- self.assertDictContainsSubset(update_service, fetched_service)
+ self.assertLessEqual(update_service.items(), fetched_service.items())
@decorators.idempotent_id('d1dcb1a1-2b6b-4da8-bbb8-5532ef6e8269')
def test_create_service_without_description(self):
@@ -72,7 +72,7 @@
type=serv_type, name=name)['service']
self.addCleanup(self.services_client.delete_service, service['id'])
expected_data = {'name': name, 'type': serv_type}
- self.assertDictContainsSubset(expected_data, service)
+ self.assertLessEqual(expected_data.items(), service.items())
@decorators.idempotent_id('e55908e8-360e-439e-8719-c3230a3e179e')
def test_list_services(self):
diff --git a/tempest/api/identity/admin/v3/test_tokens.py b/tempest/api/identity/admin/v3/test_tokens.py
index 5bbd65c..e191979 100644
--- a/tempest/api/identity/admin/v3/test_tokens.py
+++ b/tempest/api/identity/admin/v3/test_tokens.py
@@ -38,14 +38,17 @@
# Create a user.
user_password = data_utils.rand_password()
- user = self.create_test_user(password=user_password)
+ user = self.create_test_user(password=user_password,
+ domain_id=CONF.identity.default_domain_id)
# Create a couple projects
project1_name = data_utils.rand_name(name=self.__class__.__name__)
- project1 = self.setup_test_project(name=project1_name)
+ project1 = self.setup_test_project(
+ name=project1_name, domain_id=CONF.identity.default_domain_id)
project2_name = data_utils.rand_name(name=self.__class__.__name__)
- project2 = self.setup_test_project(name=project2_name)
+ project2 = self.setup_test_project(
+ name=project2_name, domain_id=CONF.identity.default_domain_id)
self.addCleanup(self.projects_client.delete_project, project2['id'])
# Create a role
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index 47a8590..696d68d 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -76,6 +76,8 @@
cls.subnetpools_client = cls.os_primary.subnetpools_client
cls.subnets_client = cls.os_primary.subnets_client
cls.ports_client = cls.os_primary.ports_client
+ cls.floating_ips_port_forwarding_client =\
+ cls.os_primary.floating_ips_port_forwarding_client
cls.quotas_client = cls.os_primary.network_quotas_client
cls.floating_ips_client = cls.os_primary.floating_ips_client
cls.security_groups_client = cls.os_primary.security_groups_client
diff --git a/tempest/api/network/test_security_groups.py b/tempest/api/network/test_security_groups.py
index d75acfc..532ef65 100644
--- a/tempest/api/network/test_security_groups.py
+++ b/tempest/api/network/test_security_groups.py
@@ -110,6 +110,33 @@
# Delete security group
self._delete_security_group(group_create_body['security_group']['id'])
+ @decorators.idempotent_id('fd1ea1c5-eedc-403f-898d-2b562e853f2e')
+ def test_delete_security_group_clear_associated_rules(self):
+ """Verify delete security group.
+
+ its associated security group rules are also deleted
+ """
+ group_create_body, _ = self._create_security_group()
+
+ # Create rules for tcp protocol
+ client = self.security_group_rules_client
+ rule_create_body = client.create_security_group_rule(
+ security_group_id=group_create_body['security_group']['id'],
+ protocol='tcp',
+ direction='ingress',
+ ethertype=self.ethertype
+ )
+ rule_id = rule_create_body['security_group_rule']['id']
+ # Delete security group
+ self._delete_security_group(group_create_body['security_group']['id'])
+
+ # List rules and verify created rule is not in response
+ rule_list_body = (
+ self.security_group_rules_client.list_security_group_rules())
+ rule_list = [rule['id']
+ for rule in rule_list_body['security_group_rules']]
+ self.assertNotIn(rule_id, rule_list)
+
@decorators.attr(type='smoke')
@decorators.idempotent_id('cfb99e0e-7410-4a3d-8a0c-959a63ee77e9')
def test_create_show_delete_security_group_rule(self):
diff --git a/tempest/api/volume/admin/test_group_snapshots.py b/tempest/api/volume/admin/test_group_snapshots.py
index ddfc78a..73903cf 100644
--- a/tempest/api/volume/admin/test_group_snapshots.py
+++ b/tempest/api/volume/admin/test_group_snapshots.py
@@ -256,7 +256,6 @@
volume_max_microversion = 'latest'
@decorators.idempotent_id('3b42c9b9-c984-4444-816e-ca2e1ed30b40')
- @decorators.skip_because(bug='1770179')
def test_reset_group_snapshot_status(self):
"""Test resetting group snapshot status to creating/available/error"""
# Create volume type
diff --git a/tempest/api/volume/admin/test_group_type_specs.py b/tempest/api/volume/admin/test_group_type_specs.py
index 63c3546..181926e 100644
--- a/tempest/api/volume/admin/test_group_type_specs.py
+++ b/tempest/api/volume/admin/test_group_type_specs.py
@@ -73,10 +73,11 @@
self.assertEqual(list_specs, body)
# Delete specified item of group type specs
- delete_key = 'key1'
- self.admin_group_types_client.delete_group_type_specs_item(
- group_type['id'], delete_key)
- self.assertRaises(
- lib_exc.NotFound,
- self.admin_group_types_client.show_group_type_specs_item,
- group_type['id'], delete_key)
+ delete_keys = ['key1', 'key2', 'key3']
+ for it in delete_keys:
+ self.admin_group_types_client.delete_group_type_specs_item(
+ group_type['id'], it)
+ self.assertRaises(
+ lib_exc.NotFound,
+ self.admin_group_types_client.show_group_type_specs_item,
+ group_type['id'], it)
diff --git a/tempest/api/volume/admin/test_group_types.py b/tempest/api/volume/admin/test_group_types.py
index 97455f3..8154682 100644
--- a/tempest/api/volume/admin/test_group_types.py
+++ b/tempest/api/volume/admin/test_group_types.py
@@ -25,8 +25,8 @@
volume_max_microversion = 'latest'
@decorators.idempotent_id('dd71e5f9-393e-4d4f-90e9-fa1b8d278864')
- def test_group_type_create_list_update_show(self):
- """Test create/list/update/show group type"""
+ def test_group_type_create_list_update_show_delete(self):
+ """Test create/list/update/show/delete group type"""
name = data_utils.rand_name(self.__class__.__name__ + '-group-type')
description = data_utils.rand_name("group-type-description")
group_specs = {"consistent_group_snapshot_enabled": "<is> False"}
@@ -34,7 +34,8 @@
'description': description,
'group_specs': group_specs,
'is_public': True}
- body = self.create_group_type(**params)
+ body = self.admin_group_types_client.create_group_type(
+ **params)['group_type']
self.assertIn('name', body)
err_msg = ("The created group_type %(var)s is not equal to the "
"requested %(var)s")
@@ -64,3 +65,9 @@
self.assertEqual(params[key], fetched_group_type[key],
'%s of the fetched group_type is different '
'from the created group_type' % key)
+
+ self.admin_group_types_client.delete_group_type(body['id'])
+ group_list = (
+ self.admin_group_types_client.list_group_types()['group_types'])
+ group_ids = [it['id'] for it in group_list]
+ self.assertNotIn(body['id'], group_ids)
diff --git a/tempest/api/volume/admin/test_volume_quotas.py b/tempest/api/volume/admin/test_volume_quotas.py
index 5ab8e87..6b58189 100644
--- a/tempest/api/volume/admin/test_volume_quotas.py
+++ b/tempest/api/volume/admin/test_volume_quotas.py
@@ -87,7 +87,7 @@
# test that the specific values we set are actually in
# the final result. There is nothing here that ensures there
# would be no other values in there.
- self.assertDictContainsSubset(new_quota_set, quota_set)
+ self.assertLessEqual(new_quota_set.items(), quota_set.items())
@decorators.idempotent_id('18c51ae9-cb03-48fc-b234-14a19374dbed')
def test_show_quota_usage(self):
diff --git a/tempest/api/volume/api_microversion_fixture.py b/tempest/api/volume/api_microversion_fixture.py
index 7bbe674..219fde8 100644
--- a/tempest/api/volume/api_microversion_fixture.py
+++ b/tempest/api/volume/api_microversion_fixture.py
@@ -12,14 +12,23 @@
# under the License.
import fixtures
+from oslo_log import log as logging
from tempest.lib.services.volume import base_client
+LOG = logging.getLogger(__name__)
+
class APIMicroversionFixture(fixtures.Fixture):
def __init__(self, volume_microversion):
self.volume_microversion = volume_microversion
+ new_fixture = (
+ 'tempest.lib.common.api_microversion_fixture.'
+ 'APIMicroversionFixture')
+ LOG.warning("%s class is deprecated and moved to %s. It"
+ " will be removed in Z cycle.",
+ self.__class__.__name__, new_fixture)
def _setUp(self):
super(APIMicroversionFixture, self)._setUp()
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 6e34dd6..b90b5bb 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -16,7 +16,6 @@
from tempest.common import compute
from tempest.common import waiters
from tempest import config
-from tempest.lib.common import api_microversion_fixture
from tempest.lib.common import api_version_utils
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
@@ -75,12 +74,6 @@
cls.os_primary.volume_availability_zone_client_latest)
cls.volume_limits_client = cls.os_primary.volume_limits_client_latest
- def setUp(self):
- super(BaseVolumeTest, self).setUp()
- self.useFixture(api_microversion_fixture.APIMicroversionFixture(
- compute_microversion=self.compute_request_microversion,
- volume_microversion=self.volume_request_microversion))
-
@classmethod
def resource_setup(cls):
super(BaseVolumeTest, cls).resource_setup()
@@ -92,6 +85,9 @@
api_version_utils.select_request_microversion(
cls.min_microversion,
CONF.compute.min_microversion))
+ cls.setup_api_microversion_fixture(
+ compute_microversion=cls.compute_request_microversion,
+ volume_microversion=cls.volume_request_microversion)
cls.image_ref = CONF.compute.image_ref
cls.flavor_ref = CONF.compute.flavor_ref
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index fd2e7c4..a58da7e 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -167,7 +167,8 @@
@decorators.idempotent_id('677863d1-3142-456d-b6ac-9924f667a7f4')
def test_volume_from_snapshot(self):
"""Test creating volume from snapshot with extending size"""
- self._create_volume_from_snapshot(extra_size=1)
+ self._create_volume_from_snapshot(
+ extra_size=CONF.volume.volume_size_extend)
@decorators.idempotent_id('053d8870-8282-4fff-9dbb-99cb58bb5e0a')
def test_volume_from_snapshot_no_size(self):
diff --git a/tempest/clients.py b/tempest/clients.py
index 6a25997..4c3d875 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -59,6 +59,8 @@
self.ports_client = self.network.PortsClient()
self.network_quotas_client = self.network.QuotasClient()
self.floating_ips_client = self.network.FloatingIPsClient()
+ self.floating_ips_port_forwarding_client =\
+ self.network.FloatingIpsPortForwardingClient()
self.metering_labels_client = self.network.MeteringLabelsClient()
self.metering_label_rules_client = (
self.network.MeteringLabelRulesClient())
@@ -72,6 +74,8 @@
self.qos_client = self.network.QosClient()
self.qos_min_bw_client = self.network.QosMinimumBandwidthRulesClient()
self.qos_limit_bw_client = self.network.QosLimitBandwidthRulesClient()
+ self.qos_min_pps_client = (
+ self.network.QosMinimumPacketRateRulesClient())
self.segments_client = self.network.SegmentsClient()
self.trunks_client = self.network.TrunksClient()
self.log_resource_client = self.network.LogResourceClient()
@@ -114,7 +118,8 @@
self.server_groups_client = self.compute.ServerGroupsClient()
self.limits_client = self.compute.LimitsClient()
self.compute_images_client = self.compute.ImagesClient()
- self.keypairs_client = self.compute.KeyPairsClient()
+ self.keypairs_client = self.compute.KeyPairsClient(
+ ssh_key_type=CONF.validation.ssh_key_type)
self.quotas_client = self.compute.QuotasClient()
self.quota_classes_client = self.compute.QuotaClassesClient()
self.flavors_client = self.compute.FlavorsClient()
diff --git a/tempest/cmd/account_generator.py b/tempest/cmd/account_generator.py
index 917262e..ad0b547 100755
--- a/tempest/cmd/account_generator.py
+++ b/tempest/cmd/account_generator.py
@@ -81,11 +81,11 @@
will have the prefix with the given TAG in its name. Using tag is recommended
for the further using, cleaning resources.
-* ``-r, --concurrency CONCURRENCY`` (Optional) Concurrency count
- (default: 1). The number of accounts required can be estimated as
- CONCURRENCY x 2. Each user provided in *accounts.yaml* file will be in
- a different tenant. This is required to provide isolation between test for
- running in parallel.
+* ``-r, --concurrency CONCURRENCY`` (Optional) Concurrency count (default: 2).
+ The number of accounts generated will be same as CONCURRENCY. The higher the
+ number, the more tests will run in parallel. If you want to run tests
+ sequentially then use 1 as value for concurrency (beware that tests that need
+ more credentials will fail).
* ``--with-admin`` (Optional) Creates admin for each concurrent group
(default: False).
@@ -236,7 +236,7 @@
dest='tag',
help='Resources tag')
parser.add_argument('-r', '--concurrency',
- default=1,
+ default=2,
type=positive_int,
required=False,
dest='concurrency',
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index a062f6f..d34cd6d 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -25,9 +25,11 @@
from tempest.common import waiters
from tempest import config
+from tempest import exceptions
from tempest.lib.common import fixed_network
from tempest.lib.common import rest_client
from tempest.lib.common.utils import data_utils
+from tempest.lib import exceptions as lib_exc
CONF = config.CONF
@@ -54,10 +56,37 @@
return False
+def get_server_ip(server, validation_resources=None):
+ """Get the server fixed or floating IP.
+
+ Based on the configuration we're in, return a correct ip
+ address for validating that a guest is up.
+
+ :param server: The server dict as returned by the API
+ :param validation_resources: The dict of validation resources
+ provisioned for the server.
+ """
+ if CONF.validation.connect_method == 'floating':
+ if validation_resources:
+ return validation_resources['floating_ip']['ip']
+ else:
+ msg = ('When validation.connect_method equals floating, '
+ 'validation_resources cannot be None')
+ raise lib_exc.InvalidParam(invalid_param=msg)
+ elif CONF.validation.connect_method == 'fixed':
+ addresses = server['addresses'][CONF.validation.network_for_ssh]
+ for address in addresses:
+ if address['version'] == CONF.validation.ip_version_for_ssh:
+ return address['addr']
+ raise exceptions.ServerUnreachable(server_id=server['id'])
+ else:
+ raise lib_exc.InvalidConfiguration()
+
+
def create_test_server(clients, validatable=False, validation_resources=None,
tenant_network=None, wait_until=None,
volume_backed=False, name=None, flavor=None,
- image_id=None, wait_for_sshable=True, **kwargs):
+ image_id=None, **kwargs):
"""Common wrapper utility returning a test server.
This method is a common wrapper returning a test server that can be
@@ -93,8 +122,6 @@
CONF.compute.flavor_ref will be used instead.
:param image_id: ID of the image to be used to provision the server. If not
defined, CONF.compute.image_ref will be used instead.
- :param wait_for_sshable: Check server's console log and wait until it will
- be ready to login.
:returns: a tuple
"""
@@ -197,6 +224,7 @@
body = clients.servers_client.create_server(name=name, imageRef=image_id,
flavorRef=flavor,
**kwargs)
+ request_id = body.response['x-openstack-request-id']
# handle the case of multiple servers
if multiple_create_request:
@@ -234,7 +262,8 @@
for server in servers:
try:
waiters.wait_for_server_status(
- clients.servers_client, server['id'], wait_until)
+ clients.servers_client, server['id'], wait_until,
+ request_id=request_id)
# Multiple validatable servers are not supported for now. Their
# creation will fail with the condition above.
@@ -265,10 +294,6 @@
LOG.exception('Server %s failed to delete in time',
server['id'])
- if (validatable and CONF.compute_feature_enabled.console_output and
- wait_for_sshable):
- waiters.wait_for_guest_os_boot(clients.servers_client, server['id'])
-
return body, servers
diff --git a/tempest/common/utils/linux/remote_client.py b/tempest/common/utils/linux/remote_client.py
index 5d6e129..9d9fab7 100644
--- a/tempest/common/utils/linux/remote_client.py
+++ b/tempest/common/utils/linux/remote_client.py
@@ -48,7 +48,8 @@
console_output_enabled=CONF.compute_feature_enabled.console_output,
ssh_shell_prologue=CONF.validation.ssh_shell_prologue,
ping_count=CONF.validation.ping_count,
- ping_size=CONF.validation.ping_size)
+ ping_size=CONF.validation.ping_size,
+ ssh_key_type=CONF.validation.ssh_key_type)
# Note that this method will not work on SLES11 guests, as they do
# not support the TYPE column on lsblk
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index f6a4555..fbc8698 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -32,7 +32,8 @@
# NOTE(afazekas): This function needs to know a token and a subject.
def wait_for_server_status(client, server_id, status, ready_wait=True,
- extra_timeout=0, raise_on_error=True):
+ extra_timeout=0, raise_on_error=True,
+ request_id=None):
"""Waits for a server to reach a given status."""
# NOTE(afazekas): UNKNOWN status possible on ERROR
@@ -71,11 +72,12 @@
'/'.join((server_status, str(task_state))),
time.time() - start_time)
if (server_status == 'ERROR') and raise_on_error:
+ details = ''
if 'fault' in body:
- raise exceptions.BuildErrorException(body['fault'],
- server_id=server_id)
- else:
- raise exceptions.BuildErrorException(server_id=server_id)
+ details += 'Fault: %s.' % body['fault']
+ if request_id:
+ details += ' Server boot request ID: %s.' % request_id
+ raise exceptions.BuildErrorException(details, server_id=server_id)
timed_out = int(time.time()) - start_time >= timeout
@@ -88,6 +90,8 @@
'status': status,
'expected_task_state': expected_task_state,
'timeout': timeout})
+ if request_id:
+ message += ' Server boot request ID: %s.' % request_id
message += ' Current status: %s.' % server_status
message += ' Current task state: %s.' % task_state
caller = test_utils.find_test_caller()
@@ -489,18 +493,34 @@
return body
-def wait_for_interface_detach(client, server_id, port_id):
+def wait_for_interface_detach(client, server_id, port_id, detach_request_id):
"""Waits for an interface to be detached from a server."""
- body = client.list_interfaces(server_id)['interfaceAttachments']
- ports = [iface['port_id'] for iface in body]
+ def _get_detach_event_results():
+ # NOTE(gibi): The obvious choice for this waiter would be to wait
+ # until the interface disappears from the client.list_interfaces()
+ # response. However that response is based on the binding status of the
+ # port in Neutron. Nova deallocates the port resources _after the port
+ # was unbound in Neutron. This can cause that the naive waiter would
+ # return before the port is fully deallocated. Wait instead of the
+ # os-instance-action to succeed as that is recorded after both the
+ # port is fully deallocated.
+ events = client.show_instance_action(
+ server_id, detach_request_id)['instanceAction'].get('events', [])
+ return [
+ event['result'] for event in events
+ if event['event'] == 'compute_detach_interface'
+ ]
+
+ detach_event_results = _get_detach_event_results()
+
start = int(time.time())
- while port_id in ports:
+ while "Success" not in detach_event_results:
time.sleep(client.build_interval)
- body = client.list_interfaces(server_id)['interfaceAttachments']
- ports = [iface['port_id'] for iface in body]
- if port_id not in ports:
- return body
+ detach_event_results = _get_detach_event_results()
+ if "Success" in detach_event_results:
+ return client.show_instance_action(
+ server_id, detach_request_id)['instanceAction']
timed_out = int(time.time()) - start >= client.build_timeout
if timed_out:
@@ -510,18 +530,43 @@
raise lib_exc.TimeoutException(message)
-def wait_for_guest_os_boot(client, server_id):
+def wait_for_server_floating_ip(servers_client, server, floating_ip,
+ wait_for_disassociate=False):
+ """Wait for floating IP association or disassociation.
+
+ :param servers_client: The servers client to use when querying the server's
+ floating IPs.
+ :param server: The server JSON dict on which to wait.
+ :param floating_ip: The floating IP JSON dict on which to wait.
+ :param wait_for_disassociate: Boolean indiating whether to wait for
+ disassociation instead of association.
+ """
+
+ def _get_floating_ip_in_server_addresses(floating_ip, server):
+ for addresses in server['addresses'].values():
+ for address in addresses:
+ if (
+ address['OS-EXT-IPS:type'] == 'floating' and
+ address['addr'] == floating_ip['floating_ip_address']
+ ):
+ return address
+ return None
+
start_time = int(time.time())
while True:
- console_output = client.get_console_output(server_id)['output']
- for line in console_output.split('\n'):
- if 'login:' in line.lower():
- return
- if int(time.time()) - start_time >= client.build_timeout:
- LOG.info("Guest OS on server %s probably isn't ready or its "
- "console log can't be parsed properly. If guest OS "
- "isn't ready, that may cause problems with SSH to "
- "the server.",
- server_id)
- return
- time.sleep(client.build_interval)
+ server = servers_client.show_server(server['id'])['server']
+ address = _get_floating_ip_in_server_addresses(floating_ip, server)
+ if address is None and wait_for_disassociate:
+ return None
+ if not wait_for_disassociate and address:
+ return address
+
+ if int(time.time()) - start_time >= servers_client.build_timeout:
+ if wait_for_disassociate:
+ msg = ('Floating ip %s failed to disassociate from server %s '
+ 'in time.' % (floating_ip, server['id']))
+ else:
+ msg = ('Floating ip %s failed to associate with server %s '
+ 'in time.' % (floating_ip, server['id']))
+ raise lib_exc.TimeoutException(msg)
+ time.sleep(servers_client.build_interval)
diff --git a/tempest/config.py b/tempest/config.py
index 662a249..03ddbf5 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -875,7 +875,10 @@
'bandwidth allocation.'),
cfg.StrOpt('provider_net_base_segmentation_id', default=3000,
help='Base segmentation ID to create provider networks. '
- 'This value will be increased in case of conflict.')
+ 'This value will be increased in case of conflict.'),
+ cfg.BoolOpt('qos_min_bw_and_pps', default=False,
+ help='Does the test environment have minimum bandwidth and '
+ 'packet rate inventories configured?'),
]
dashboard_group = cfg.OptGroup(name="dashboard",
@@ -967,6 +970,10 @@
default='public',
help="Network used for SSH connections. Ignored if "
"connect_method=floating."),
+ cfg.StrOpt('ssh_key_type',
+ default='rsa',
+ help='Type of key to use for ssh connections. '
+ 'Valid types are rsa, ecdsa'),
]
volume_group = cfg.OptGroup(name='volume',
diff --git a/tempest/lib/api_schema/response/compute/v2_16/servers.py b/tempest/lib/api_schema/response/compute/v2_16/servers.py
index fc81ff7..dcd64cf 100644
--- a/tempest/lib/api_schema/response/compute/v2_16/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_16/servers.py
@@ -171,3 +171,4 @@
attach_volume = copy.deepcopy(servers.attach_volume)
show_volume_attachment = copy.deepcopy(servers.show_volume_attachment)
list_volume_attachments = copy.deepcopy(servers.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_19/servers.py b/tempest/lib/api_schema/response/compute/v2_19/servers.py
index b6c3c14..0e4bd5c 100644
--- a/tempest/lib/api_schema/response/compute/v2_19/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_19/servers.py
@@ -61,3 +61,4 @@
attach_volume = copy.deepcopy(serversv216.attach_volume)
show_volume_attachment = copy.deepcopy(serversv216.show_volume_attachment)
list_volume_attachments = copy.deepcopy(serversv216.list_volume_attachments)
+show_instance_action = copy.deepcopy(serversv216.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_26/servers.py b/tempest/lib/api_schema/response/compute/v2_26/servers.py
index 5a0f987..74c08f1 100644
--- a/tempest/lib/api_schema/response/compute/v2_26/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_26/servers.py
@@ -104,3 +104,4 @@
attach_volume = copy.deepcopy(servers219.attach_volume)
show_volume_attachment = copy.deepcopy(servers219.show_volume_attachment)
list_volume_attachments = copy.deepcopy(servers219.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers219.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_3/servers.py b/tempest/lib/api_schema/response/compute/v2_3/servers.py
index 1674c1b..435e3ac 100644
--- a/tempest/lib/api_schema/response/compute/v2_3/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_3/servers.py
@@ -176,3 +176,4 @@
attach_volume = copy.deepcopy(servers.attach_volume)
show_volume_attachment = copy.deepcopy(servers.show_volume_attachment)
list_volume_attachments = copy.deepcopy(servers.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_33/__init__.py b/tempest/lib/api_schema/response/compute/v2_33/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_33/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_33/hypervisors.py b/tempest/lib/api_schema/response/compute/v2_33/hypervisors.py
new file mode 100644
index 0000000..9773605
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_33/hypervisors.py
@@ -0,0 +1,53 @@
+# Copyright 2018 ZTE Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
+from tempest.lib.api_schema.response.compute.v2_28 \
+ import hypervisors as hypervisorsv228
+
+###########################################################################
+#
+# 2.33:
+#
+# hypervisor_links parameter is added to the response body for the following
+# APIs:
+#
+# - GET /os-hypervisors
+# - GET /os-hypervisors/detail
+###########################################################################
+list_search_hypervisors = copy.deepcopy(
+ hypervisorsv228.list_search_hypervisors)
+list_search_hypervisors['response_body']['properties'].update(
+ {'hypervisors_links': parameter_types.links}
+)
+
+list_hypervisors_detail = copy.deepcopy(
+ hypervisorsv228.list_hypervisors_detail)
+list_hypervisors_detail['response_body']['properties'].update(
+ {'hypervisors_links': parameter_types.links}
+)
+
+# NOTE(zhufl): Below are the unchanged schema in this microversion. We need
+# to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+# ****** Schemas unchanged since microversion 2.28 ***
+get_hypervisor = copy.deepcopy(hypervisorsv228.get_hypervisor)
+hypervisor_detail = copy.deepcopy(hypervisorsv228.hypervisor_detail)
+get_hypervisor_statistics = \
+ copy.deepcopy(hypervisorsv228.get_hypervisor_statistics)
+get_hypervisor_uptime = copy.deepcopy(hypervisorsv228.get_hypervisor_uptime)
+get_hypervisors_servers = copy.deepcopy(
+ hypervisorsv228.get_hypervisors_servers)
diff --git a/tempest/lib/api_schema/response/compute/v2_47/servers.py b/tempest/lib/api_schema/response/compute/v2_47/servers.py
index d580f2c..7050602 100644
--- a/tempest/lib/api_schema/response/compute/v2_47/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_47/servers.py
@@ -69,3 +69,4 @@
attach_volume = copy.deepcopy(servers226.attach_volume)
show_volume_attachment = copy.deepcopy(servers226.show_volume_attachment)
list_volume_attachments = copy.deepcopy(servers226.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers226.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_48/servers.py b/tempest/lib/api_schema/response/compute/v2_48/servers.py
index e2e45bc..af6344b 100644
--- a/tempest/lib/api_schema/response/compute/v2_48/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_48/servers.py
@@ -132,3 +132,4 @@
attach_volume = copy.deepcopy(servers247.attach_volume)
show_volume_attachment = copy.deepcopy(servers247.show_volume_attachment)
list_volume_attachments = copy.deepcopy(servers247.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers247.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_51/__init__.py b/tempest/lib/api_schema/response/compute/v2_51/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_51/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_51/servers.py b/tempest/lib/api_schema/response/compute/v2_51/servers.py
new file mode 100644
index 0000000..e603287
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_51/servers.py
@@ -0,0 +1,42 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_48 import servers as servers248
+
+# microversion 2.51 made events a mandatory field in the response
+show_instance_action = copy.deepcopy(servers248.show_instance_action)
+show_instance_action['response_body'][
+ 'properties']['instanceAction']['required'].append('events')
+
+
+# Below are the unchanged schema in this microversion. We need
+# to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+list_servers = copy.deepcopy(servers248.list_servers)
+show_server_diagnostics = copy.deepcopy(servers248.show_server_diagnostics)
+get_remote_consoles = copy.deepcopy(servers248.get_remote_consoles)
+list_tags = copy.deepcopy(servers248.list_tags)
+update_all_tags = copy.deepcopy(servers248.update_all_tags)
+delete_all_tags = copy.deepcopy(servers248.delete_all_tags)
+check_tag_existence = copy.deepcopy(servers248.check_tag_existence)
+update_tag = copy.deepcopy(servers248.update_tag)
+delete_tag = copy.deepcopy(servers248.delete_tag)
+get_server = copy.deepcopy(servers248.get_server)
+list_servers_detail = copy.deepcopy(servers248.list_servers_detail)
+update_server = copy.deepcopy(servers248.update_server)
+rebuild_server = copy.deepcopy(servers248.rebuild_server)
+rebuild_server_with_admin_pass = copy.deepcopy(
+ servers248.rebuild_server_with_admin_pass)
+attach_volume = copy.deepcopy(servers248.attach_volume)
+show_volume_attachment = copy.deepcopy(servers248.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers248.list_volume_attachments)
diff --git a/tempest/lib/api_schema/response/compute/v2_53/hypervisors.py b/tempest/lib/api_schema/response/compute/v2_53/hypervisors.py
new file mode 100644
index 0000000..e172f1f
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_53/hypervisors.py
@@ -0,0 +1,68 @@
+# Copyright 2018 ZTE Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_33 \
+ import hypervisors as hypervisorsv233
+
+###########################################################################
+#
+# 2.53:
+#
+# servers parameter is added to the response body for the following
+# APIs:
+#
+# - GET /os-hypervisor
+# - GET /os-hypervisors
+# - GET /os-hypervisors/detail
+#
+###########################################################################
+
+servers = {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'uuid': {'type': 'string', 'format': 'uuid'},
+ 'name': {'type': 'string'},
+ },
+ 'additionalProperties': False,
+ },
+}
+
+hypervisor_detail = copy.deepcopy(hypervisorsv233.hypervisor_detail)
+hypervisor_detail['properties'].update({'servers': servers})
+get_hypervisor = copy.deepcopy(hypervisorsv233.get_hypervisor)
+get_hypervisor['response_body']['properties'].update(
+ {'hypervisor': hypervisor_detail})
+list_hypervisors_detail = copy.deepcopy(
+ hypervisorsv233.list_hypervisors_detail)
+list_hypervisors_detail['response_body']['properties']['hypervisors'].update(
+ {'items': hypervisor_detail})
+
+list_search_hypervisors = copy.deepcopy(
+ hypervisorsv233.list_search_hypervisors)
+list_search_hypervisors['response_body']['properties']['hypervisors'][
+ 'items']['properties'].update({'servers': servers})
+
+# NOTE(zhufl): Below are the unchanged schema in this microversion. We need
+# to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+# ****** Schemas unchanged since microversion 2.33 ***
+get_hypervisor_statistics = \
+ copy.deepcopy(hypervisorsv233.get_hypervisor_statistics)
+get_hypervisor_uptime = copy.deepcopy(hypervisorsv233.get_hypervisor_uptime)
+get_hypervisors_servers = copy.deepcopy(
+ hypervisorsv233.get_hypervisors_servers)
diff --git a/tempest/lib/api_schema/response/compute/v2_54/servers.py b/tempest/lib/api_schema/response/compute/v2_54/servers.py
index 2c2bff0..135b381 100644
--- a/tempest/lib/api_schema/response/compute/v2_54/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_54/servers.py
@@ -12,7 +12,7 @@
import copy
-from tempest.lib.api_schema.response.compute.v2_48 import servers as servers248
+from tempest.lib.api_schema.response.compute.v2_51 import servers as servers251
# ****** Schemas changed in microversion 2.54 *****************
# Note(gmann): This is schema for microversion 2.54 which includes the
@@ -26,14 +26,14 @@
]
}
-rebuild_server = copy.deepcopy(servers248.rebuild_server)
+rebuild_server = copy.deepcopy(servers251.rebuild_server)
rebuild_server['response_body']['properties']['server'][
'properties'].update({'key_name': key_name})
rebuild_server['response_body']['properties']['server'][
'required'].append('key_name')
rebuild_server_with_admin_pass = copy.deepcopy(
- servers248.rebuild_server_with_admin_pass)
+ servers251.rebuild_server_with_admin_pass)
rebuild_server_with_admin_pass['response_body']['properties']['server'][
'properties'].update({'key_name': key_name})
rebuild_server_with_admin_pass['response_body']['properties']['server'][
@@ -43,18 +43,19 @@
# to keep this schema in this file to have the generic way to select the
# right schema based on self.schema_versions_info mapping in service client.
# ****** Schemas unchanged in microversion 2.54 since microversion 2.48 ***
-get_server = copy.deepcopy(servers248.get_server)
-list_servers_detail = copy.deepcopy(servers248.list_servers_detail)
-update_server = copy.deepcopy(servers248.update_server)
-list_servers = copy.deepcopy(servers248.list_servers)
-show_server_diagnostics = copy.deepcopy(servers248.show_server_diagnostics)
-get_remote_consoles = copy.deepcopy(servers248.get_remote_consoles)
-list_tags = copy.deepcopy(servers248.list_tags)
-update_all_tags = copy.deepcopy(servers248.update_all_tags)
-delete_all_tags = copy.deepcopy(servers248.delete_all_tags)
-check_tag_existence = copy.deepcopy(servers248.check_tag_existence)
-update_tag = copy.deepcopy(servers248.update_tag)
-delete_tag = copy.deepcopy(servers248.delete_tag)
-attach_volume = copy.deepcopy(servers248.attach_volume)
-show_volume_attachment = copy.deepcopy(servers248.show_volume_attachment)
-list_volume_attachments = copy.deepcopy(servers248.list_volume_attachments)
+get_server = copy.deepcopy(servers251.get_server)
+list_servers_detail = copy.deepcopy(servers251.list_servers_detail)
+update_server = copy.deepcopy(servers251.update_server)
+list_servers = copy.deepcopy(servers251.list_servers)
+show_server_diagnostics = copy.deepcopy(servers251.show_server_diagnostics)
+get_remote_consoles = copy.deepcopy(servers251.get_remote_consoles)
+list_tags = copy.deepcopy(servers251.list_tags)
+update_all_tags = copy.deepcopy(servers251.update_all_tags)
+delete_all_tags = copy.deepcopy(servers251.delete_all_tags)
+check_tag_existence = copy.deepcopy(servers251.check_tag_existence)
+update_tag = copy.deepcopy(servers251.update_tag)
+delete_tag = copy.deepcopy(servers251.delete_tag)
+attach_volume = copy.deepcopy(servers251.attach_volume)
+show_volume_attachment = copy.deepcopy(servers251.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers251.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers251.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_57/servers.py b/tempest/lib/api_schema/response/compute/v2_57/servers.py
index aa57d25..bdff74b 100644
--- a/tempest/lib/api_schema/response/compute/v2_57/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_57/servers.py
@@ -62,3 +62,4 @@
attach_volume = copy.deepcopy(servers254.attach_volume)
show_volume_attachment = copy.deepcopy(servers254.show_volume_attachment)
list_volume_attachments = copy.deepcopy(servers254.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers254.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_58/__init__.py b/tempest/lib/api_schema/response/compute/v2_58/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_58/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_58/servers.py b/tempest/lib/api_schema/response/compute/v2_58/servers.py
new file mode 100644
index 0000000..62239cf
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_58/servers.py
@@ -0,0 +1,44 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
+from tempest.lib.api_schema.response.compute.v2_57 import servers as servers257
+
+# microversion 2.58 added updated_at to the response
+show_instance_action = copy.deepcopy(servers257.show_instance_action)
+show_instance_action['response_body']['properties']['instanceAction'][
+ 'properties']['updated_at'] = parameter_types.date_time
+show_instance_action['response_body']['properties']['instanceAction'][
+ 'required'].append('updated_at')
+
+# Below are the unchanged schema in this microversion. We need
+# to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+list_servers = copy.deepcopy(servers257.list_servers)
+show_server_diagnostics = copy.deepcopy(servers257.show_server_diagnostics)
+get_remote_consoles = copy.deepcopy(servers257.get_remote_consoles)
+list_tags = copy.deepcopy(servers257.list_tags)
+update_all_tags = copy.deepcopy(servers257.update_all_tags)
+delete_all_tags = copy.deepcopy(servers257.delete_all_tags)
+check_tag_existence = copy.deepcopy(servers257.check_tag_existence)
+update_tag = copy.deepcopy(servers257.update_tag)
+delete_tag = copy.deepcopy(servers257.delete_tag)
+get_server = copy.deepcopy(servers257.get_server)
+list_servers_detail = copy.deepcopy(servers257.list_servers_detail)
+update_server = copy.deepcopy(servers257.update_server)
+rebuild_server = copy.deepcopy(servers257.rebuild_server)
+rebuild_server_with_admin_pass = copy.deepcopy(
+ servers257.rebuild_server_with_admin_pass)
+attach_volume = copy.deepcopy(servers257.attach_volume)
+show_volume_attachment = copy.deepcopy(servers257.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers257.list_volume_attachments)
diff --git a/tempest/lib/api_schema/response/compute/v2_6/servers.py b/tempest/lib/api_schema/response/compute/v2_6/servers.py
index 922bf79..6103b7c 100644
--- a/tempest/lib/api_schema/response/compute/v2_6/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_6/servers.py
@@ -31,6 +31,7 @@
attach_volume = copy.deepcopy(servers.attach_volume)
show_volume_attachment = copy.deepcopy(servers.show_volume_attachment)
list_volume_attachments = copy.deepcopy(servers.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers.show_instance_action)
# NOTE: The consolidated remote console API got introduced with v2.6
# with bp/consolidate-console-api. See Nova commit 578bafeda
diff --git a/tempest/lib/api_schema/response/compute/v2_62/__init__.py b/tempest/lib/api_schema/response/compute/v2_62/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_62/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_62/servers.py b/tempest/lib/api_schema/response/compute/v2_62/servers.py
new file mode 100644
index 0000000..23eebbb
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_62/servers.py
@@ -0,0 +1,47 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_58 import servers as servers258
+
+# microversion 2.62 added hostId and host to the event, but only hostId is
+# mandatory
+show_instance_action = copy.deepcopy(servers258.show_instance_action)
+show_instance_action['response_body']['properties']['instanceAction'][
+ 'properties']['events']['items'][
+ 'properties']['hostId'] = {'type': 'string'}
+show_instance_action['response_body']['properties']['instanceAction'][
+ 'properties']['events']['items']['properties']['host'] = {'type': 'string'}
+show_instance_action['response_body']['properties']['instanceAction'][
+ 'properties']['events']['items']['required'].append('hostId')
+
+# Below are the unchanged schema in this microversion. We need
+# to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+list_servers = copy.deepcopy(servers258.list_servers)
+show_server_diagnostics = copy.deepcopy(servers258.show_server_diagnostics)
+get_remote_consoles = copy.deepcopy(servers258.get_remote_consoles)
+list_tags = copy.deepcopy(servers258.list_tags)
+update_all_tags = copy.deepcopy(servers258.update_all_tags)
+delete_all_tags = copy.deepcopy(servers258.delete_all_tags)
+check_tag_existence = copy.deepcopy(servers258.check_tag_existence)
+update_tag = copy.deepcopy(servers258.update_tag)
+delete_tag = copy.deepcopy(servers258.delete_tag)
+get_server = copy.deepcopy(servers258.get_server)
+list_servers_detail = copy.deepcopy(servers258.list_servers_detail)
+update_server = copy.deepcopy(servers258.update_server)
+rebuild_server = copy.deepcopy(servers258.rebuild_server)
+rebuild_server_with_admin_pass = copy.deepcopy(
+ servers258.rebuild_server_with_admin_pass)
+attach_volume = copy.deepcopy(servers258.attach_volume)
+show_volume_attachment = copy.deepcopy(servers258.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers258.list_volume_attachments)
diff --git a/tempest/lib/api_schema/response/compute/v2_63/servers.py b/tempest/lib/api_schema/response/compute/v2_63/servers.py
index 01910aa..db713b1 100644
--- a/tempest/lib/api_schema/response/compute/v2_63/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_63/servers.py
@@ -12,7 +12,7 @@
import copy
-from tempest.lib.api_schema.response.compute.v2_57 import servers as servers257
+from tempest.lib.api_schema.response.compute.v2_62 import servers as servers262
# Nova microversion 2.63 adds 'trusted_image_certificates' (a list of
# certificate IDs) to the server rebuild and servers details responses.
@@ -29,32 +29,32 @@
}
}
-list_servers_detail = copy.deepcopy(servers257.list_servers_detail)
+list_servers_detail = copy.deepcopy(servers262.list_servers_detail)
list_servers_detail['response_body']['properties']['servers']['items'][
'properties'].update({'trusted_image_certificates': trusted_certs})
list_servers_detail['response_body']['properties']['servers']['items'][
'required'].append('trusted_image_certificates')
-rebuild_server = copy.deepcopy(servers257.rebuild_server)
+rebuild_server = copy.deepcopy(servers262.rebuild_server)
rebuild_server['response_body']['properties']['server'][
'properties'].update({'trusted_image_certificates': trusted_certs})
rebuild_server['response_body']['properties']['server'][
'required'].append('trusted_image_certificates')
rebuild_server_with_admin_pass = copy.deepcopy(
- servers257.rebuild_server_with_admin_pass)
+ servers262.rebuild_server_with_admin_pass)
rebuild_server_with_admin_pass['response_body']['properties']['server'][
'properties'].update({'trusted_image_certificates': trusted_certs})
rebuild_server_with_admin_pass['response_body']['properties']['server'][
'required'].append('trusted_image_certificates')
-update_server = copy.deepcopy(servers257.update_server)
+update_server = copy.deepcopy(servers262.update_server)
update_server['response_body']['properties']['server'][
'properties'].update({'trusted_image_certificates': trusted_certs})
update_server['response_body']['properties']['server'][
'required'].append('trusted_image_certificates')
-get_server = copy.deepcopy(servers257.get_server)
+get_server = copy.deepcopy(servers262.get_server)
get_server['response_body']['properties']['server'][
'properties'].update({'trusted_image_certificates': trusted_certs})
get_server['response_body']['properties']['server'][
@@ -64,15 +64,16 @@
# to keep this schema in this file to have the generic way to select the
# right schema based on self.schema_versions_info mapping in service client.
# ****** Schemas unchanged since microversion 2.57 ***
-list_servers = copy.deepcopy(servers257.list_servers)
-show_server_diagnostics = copy.deepcopy(servers257.show_server_diagnostics)
-get_remote_consoles = copy.deepcopy(servers257.get_remote_consoles)
-list_tags = copy.deepcopy(servers257.list_tags)
-update_all_tags = copy.deepcopy(servers257.update_all_tags)
-delete_all_tags = copy.deepcopy(servers257.delete_all_tags)
-check_tag_existence = copy.deepcopy(servers257.check_tag_existence)
-update_tag = copy.deepcopy(servers257.update_tag)
-delete_tag = copy.deepcopy(servers257.delete_tag)
-attach_volume = copy.deepcopy(servers257.attach_volume)
-show_volume_attachment = copy.deepcopy(servers257.show_volume_attachment)
-list_volume_attachments = copy.deepcopy(servers257.list_volume_attachments)
+list_servers = copy.deepcopy(servers262.list_servers)
+show_server_diagnostics = copy.deepcopy(servers262.show_server_diagnostics)
+get_remote_consoles = copy.deepcopy(servers262.get_remote_consoles)
+list_tags = copy.deepcopy(servers262.list_tags)
+update_all_tags = copy.deepcopy(servers262.update_all_tags)
+delete_all_tags = copy.deepcopy(servers262.delete_all_tags)
+check_tag_existence = copy.deepcopy(servers262.check_tag_existence)
+update_tag = copy.deepcopy(servers262.update_tag)
+delete_tag = copy.deepcopy(servers262.delete_tag)
+attach_volume = copy.deepcopy(servers262.attach_volume)
+show_volume_attachment = copy.deepcopy(servers262.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers262.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers262.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_70/servers.py b/tempest/lib/api_schema/response/compute/v2_70/servers.py
index 5ca4cc8..6103923 100644
--- a/tempest/lib/api_schema/response/compute/v2_70/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_70/servers.py
@@ -78,3 +78,4 @@
check_tag_existence = copy.deepcopy(servers263.check_tag_existence)
update_tag = copy.deepcopy(servers263.update_tag)
delete_tag = copy.deepcopy(servers263.delete_tag)
+show_instance_action = copy.deepcopy(servers263.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_71/servers.py b/tempest/lib/api_schema/response/compute/v2_71/servers.py
index f4c01ee..3e55c1c 100644
--- a/tempest/lib/api_schema/response/compute/v2_71/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_71/servers.py
@@ -82,3 +82,4 @@
attach_volume = copy.deepcopy(servers270.attach_volume)
show_volume_attachment = copy.deepcopy(servers270.show_volume_attachment)
list_volume_attachments = copy.deepcopy(servers270.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers270.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_73/servers.py b/tempest/lib/api_schema/response/compute/v2_73/servers.py
index ae7ebc4..e7a1d87 100644
--- a/tempest/lib/api_schema/response/compute/v2_73/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_73/servers.py
@@ -79,3 +79,4 @@
attach_volume = copy.deepcopy(servers271.attach_volume)
show_volume_attachment = copy.deepcopy(servers271.show_volume_attachment)
list_volume_attachments = copy.deepcopy(servers271.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers271.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_79/servers.py b/tempest/lib/api_schema/response/compute/v2_79/servers.py
index 58dcba8..b5507f9 100644
--- a/tempest/lib/api_schema/response/compute/v2_79/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_79/servers.py
@@ -65,3 +65,4 @@
check_tag_existence = copy.deepcopy(servers273.check_tag_existence)
update_tag = copy.deepcopy(servers273.update_tag)
delete_tag = copy.deepcopy(servers273.delete_tag)
+show_instance_action = copy.deepcopy(servers273.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_8/servers.py b/tempest/lib/api_schema/response/compute/v2_8/servers.py
index 3dbab3f..119d8e2 100644
--- a/tempest/lib/api_schema/response/compute/v2_8/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_8/servers.py
@@ -38,3 +38,4 @@
attach_volume = copy.deepcopy(servers.attach_volume)
show_volume_attachment = copy.deepcopy(servers.show_volume_attachment)
list_volume_attachments = copy.deepcopy(servers.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers.show_instance_action)
diff --git a/tempest/lib/api_schema/response/compute/v2_9/servers.py b/tempest/lib/api_schema/response/compute/v2_9/servers.py
index ee0313d..9258eec 100644
--- a/tempest/lib/api_schema/response/compute/v2_9/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_9/servers.py
@@ -57,3 +57,4 @@
attach_volume = copy.deepcopy(servers.attach_volume)
show_volume_attachment = copy.deepcopy(servers.show_volume_attachment)
list_volume_attachments = copy.deepcopy(servers.list_volume_attachments)
+show_instance_action = copy.deepcopy(servers.show_instance_action)
diff --git a/tempest/lib/api_schema/response/volume/volumes.py b/tempest/lib/api_schema/response/volume/volumes.py
index ffcf488..4f44526 100644
--- a/tempest/lib/api_schema/response/volume/volumes.py
+++ b/tempest/lib/api_schema/response/volume/volumes.py
@@ -21,7 +21,7 @@
'items': {
'type': 'object',
'properties': {
- 'server_id': {'type': 'string', 'format': 'uuid'},
+ 'server_id': {'type': ['string', 'null'], 'format': 'uuid'},
'attachment_id': {'type': 'string', 'format': 'uuid'},
'attached_at': parameter_types.date_time_or_null,
'host_name': {'type': ['string', 'null']},
diff --git a/tempest/lib/base.py b/tempest/lib/base.py
index 74ae77c..3be55c0 100644
--- a/tempest/lib/base.py
+++ b/tempest/lib/base.py
@@ -14,29 +14,11 @@
# under the License.
import os
-import sys
import fixtures
-import pkg_resources
import testtools
-def _handle_skip_exception():
- try:
- stestr_version = pkg_resources.parse_version(
- pkg_resources.get_distribution("stestr").version)
- stestr_min = pkg_resources.parse_version('2.5.0')
- new_stestr = (stestr_version >= stestr_min)
- import unittest
- import unittest2
- if sys.version_info >= (3, 5) and new_stestr:
- testtools.TestCase.skipException = unittest.case.SkipTest
- else:
- testtools.TestCase.skipException = unittest2.case.SkipTest
- except Exception:
- pass
-
-
class BaseTestCase(testtools.testcase.WithAttributes, testtools.TestCase):
setUpClassCalled = False
@@ -51,18 +33,6 @@
if hasattr(super(BaseTestCase, cls), 'setUpClass'):
super(BaseTestCase, cls).setUpClass()
cls.setUpClassCalled = True
- # TODO(gmann): cls.handle_skip_exception is really workaround for
- # testtools bug- https://github.com/testing-cabal/testtools/issues/272
- # stestr which is used by Tempest internally to run the test switch
- # the customize test runner(which use stdlib unittest) for >=py3.5
- # else testtools.run.- https://github.com/mtreinish/stestr/pull/265
- # These two test runner are not compatible due to skip exception
- # handling(due to unittest2). testtools.run treat unittestt.SkipTest
- # as error and stdlib unittest treat unittest2.case.SkipTest raised
- # by testtools.TestCase.skipException.
- # The below workaround can be removed once testtools fix issue# 272.
- cls.orig_skip_exception = testtools.TestCase.skipException
- _handle_skip_exception()
@classmethod
def tearDownClass(cls):
@@ -70,7 +40,6 @@
super(BaseTestCase, cls).tearDownClass()
def setUp(self):
- testtools.TestCase.skipException = self.orig_skip_exception
super(BaseTestCase, self).setUp()
if not self.setUpClassCalled:
raise RuntimeError("setUpClass does not calls the super's "
diff --git a/tempest/lib/cmd/check_uuid.py b/tempest/lib/cmd/check_uuid.py
index 0ae11ca..466222d 100755
--- a/tempest/lib/cmd/check_uuid.py
+++ b/tempest/lib/cmd/check_uuid.py
@@ -38,7 +38,7 @@
class SourcePatcher(object):
- """"Lazy patcher for python source files"""
+ """Lazy patcher for python source files"""
def __init__(self):
self.source_files = None
@@ -431,14 +431,21 @@
help='Package with tests')
parser.add_argument('--fix', action='store_true', dest='fix_tests',
help='Attempt to fix tests without UUIDs')
+ parser.add_argument('--libpath', action='store', dest='libpath',
+ default=".", type=str,
+ help='Path to package')
+
args = parser.parse_args()
- sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
+ sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
+ sys.path.insert(0, args.libpath)
pkg = importlib.import_module(args.package)
+
checker = TestChecker(pkg)
errors = False
tests = checker.get_tests()
untagged = checker.find_untagged(tests)
errors = checker.report_collisions(tests) or errors
+
if args.fix_tests and untagged:
checker.fix_tests(untagged)
else:
diff --git a/tempest/lib/common/rest_client.py b/tempest/lib/common/rest_client.py
index 3f735f5..ef14dfc 100644
--- a/tempest/lib/common/rest_client.py
+++ b/tempest/lib/common/rest_client.py
@@ -907,8 +907,8 @@
if int(time.time()) - start_time >= self.build_timeout:
message = ('Failed to delete %(resource_type)s %(id)s within '
'the required time (%(timeout)s s). Timer started '
- 'at %(start_time)s. Timer ended at %(end_time)s'
- 'waited for %(wait_time)s' %
+ 'at %(start_time)s. Timer ended at %(end_time)s. '
+ 'Waited for %(wait_time)s s.' %
{'resource_type': self.resource_type, 'id': id,
'timeout': self.build_timeout,
'start_time': start_time,
diff --git a/tempest/lib/common/ssh.py b/tempest/lib/common/ssh.py
index ee15375..cb59a82 100644
--- a/tempest/lib/common/ssh.py
+++ b/tempest/lib/common/ssh.py
@@ -21,6 +21,7 @@
import warnings
from oslo_log import log as logging
+from oslo_utils.secretutils import md5
from tempest.lib import exceptions
@@ -33,11 +34,26 @@
LOG = logging.getLogger(__name__)
+def get_fingerprint(self):
+ """Patch paramiko
+
+ This method needs to be patched to allow paramiko to work under FIPS.
+ Until the patch to do this merges, patch paramiko here.
+
+ TODO(alee) Remove this when paramiko is patched.
+ See https://github.com/paramiko/paramiko/pull/1928
+ """
+ return md5(self.asbytes(), usedforsecurity=False).digest()
+
+
+paramiko.pkey.PKey.get_fingerprint = get_fingerprint
+
+
class Client(object):
def __init__(self, host, username, password=None, timeout=300, pkey=None,
channel_timeout=10, look_for_keys=False, key_filename=None,
- port=22, proxy_client=None):
+ port=22, proxy_client=None, ssh_key_type='rsa'):
"""SSH client.
Many of parameters are just passed to the underlying implementation
@@ -59,6 +75,7 @@
:param proxy_client: Another SSH client to provide a transport
for ssh-over-ssh. The default is None, which means
not to use ssh-over-ssh.
+ :param ssh_key_type: ssh key type (rsa, ecdsa)
:type proxy_client: ``tempest.lib.common.ssh.Client`` object
"""
self.host = host
@@ -66,8 +83,15 @@
self.port = port
self.password = password
if isinstance(pkey, str):
- pkey = paramiko.RSAKey.from_private_key(
- io.StringIO(str(pkey)))
+ if ssh_key_type == 'rsa':
+ pkey = paramiko.RSAKey.from_private_key(
+ io.StringIO(str(pkey)))
+ elif ssh_key_type == 'ecdsa':
+ pkey = paramiko.ECDSAKey.from_private_key(
+ io.StringIO(str(pkey)))
+ else:
+ raise exceptions.SSHClientUnsupportedKeyType(
+ key_type=ssh_key_type)
self.pkey = pkey
self.look_for_keys = look_for_keys
self.key_filename = key_filename
diff --git a/tempest/lib/common/utils/linux/remote_client.py b/tempest/lib/common/utils/linux/remote_client.py
index d84dd28..224f3bf 100644
--- a/tempest/lib/common/utils/linux/remote_client.py
+++ b/tempest/lib/common/utils/linux/remote_client.py
@@ -69,7 +69,7 @@
server=None, servers_client=None, ssh_timeout=300,
connect_timeout=60, console_output_enabled=True,
ssh_shell_prologue="set -eu -o pipefail; PATH=$PATH:/sbin;",
- ping_count=1, ping_size=56):
+ ping_count=1, ping_size=56, ssh_key_type='rsa'):
"""Executes commands in a VM over ssh
:param ip_address: IP address to ssh to
@@ -84,6 +84,7 @@
:param ssh_shell_prologue: Shell fragments to use before command
:param ping_count: Number of ping packets
:param ping_size: Packet size for ping packets
+ :param ssh_key_type: ssh key type (rsa, ecdsa)
"""
self.server = server
self.servers_client = servers_client
@@ -92,10 +93,12 @@
self.ssh_shell_prologue = ssh_shell_prologue
self.ping_count = ping_count
self.ping_size = ping_size
+ self.ssh_key_type = ssh_key_type
self.ssh_client = ssh.Client(ip_address, username, password,
ssh_timeout, pkey=pkey,
- channel_timeout=connect_timeout)
+ channel_timeout=connect_timeout,
+ ssh_key_type=ssh_key_type)
@debug_ssh
def exec_command(self, cmd):
diff --git a/tempest/lib/exceptions.py b/tempest/lib/exceptions.py
index abe68d2..dd7885e 100644
--- a/tempest/lib/exceptions.py
+++ b/tempest/lib/exceptions.py
@@ -256,6 +256,10 @@
"%(port)s and username: %(username)s as parent")
+class SSHClientUnsupportedKeyType(TempestException):
+ message = ("SSH client: unsupported key type %(key_type)s")
+
+
class UnknownServiceClient(TempestException):
message = "Service clients named %(services)s are not known"
diff --git a/tempest/lib/services/compute/hypervisor_client.py b/tempest/lib/services/compute/hypervisor_client.py
index 1cbfcc3..e237845 100644
--- a/tempest/lib/services/compute/hypervisor_client.py
+++ b/tempest/lib/services/compute/hypervisor_client.py
@@ -13,12 +13,18 @@
# License for the specific language governing permissions and limitations
# under the License.
+from urllib import parse as urllib
+
from oslo_serialization import jsonutils as json
from tempest.lib.api_schema.response.compute.v2_1 \
import hypervisors as schemav21
from tempest.lib.api_schema.response.compute.v2_28 \
import hypervisors as schemav228
+from tempest.lib.api_schema.response.compute.v2_33 \
+ import hypervisors as schemav233
+from tempest.lib.api_schema.response.compute.v2_53 \
+ import hypervisors as schemav253
from tempest.lib.common import rest_client
from tempest.lib.services.compute import base_compute_client
@@ -27,9 +33,11 @@
schema_versions_info = [
{'min': None, 'max': '2.27', 'schema': schemav21},
- {'min': '2.28', 'max': None, 'schema': schemav228}]
+ {'min': '2.28', 'max': '2.32', 'schema': schemav228},
+ {'min': '2.33', 'max': '2.52', 'schema': schemav233},
+ {'min': '2.53', 'max': None, 'schema': schemav253}]
- def list_hypervisors(self, detail=False):
+ def list_hypervisors(self, detail=False, **kwargs):
"""List hypervisors information."""
url = 'os-hypervisors'
schema = self.get_schema(self.schema_versions_info)
@@ -37,14 +45,19 @@
if detail:
url += '/detail'
_schema = schema.list_hypervisors_detail
+ if kwargs:
+ url += '?%s' % urllib.urlencode(kwargs)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(_schema, resp, body)
return rest_client.ResponseBody(resp, body)
- def show_hypervisor(self, hypervisor_id):
+ def show_hypervisor(self, hypervisor_id, **kwargs):
"""Display the details of the specified hypervisor."""
+ url = 'os-hypervisors/%s' % hypervisor_id
+ if kwargs:
+ url += '?%s' % urllib.urlencode(kwargs)
resp, body = self.get('os-hypervisors/%s' % hypervisor_id)
body = json.loads(body)
schema = self.get_schema(self.schema_versions_info)
diff --git a/tempest/lib/services/compute/keypairs_client.py b/tempest/lib/services/compute/keypairs_client.py
index 9d7b7fc..51a4583 100644
--- a/tempest/lib/services/compute/keypairs_client.py
+++ b/tempest/lib/services/compute/keypairs_client.py
@@ -15,6 +15,10 @@
from urllib import parse as urllib
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives.asymmetric import ec
+from cryptography.hazmat.primitives import serialization
+
from oslo_serialization import jsonutils as json
from tempest.lib.api_schema.response.compute.v2_1 import keypairs as schemav21
@@ -28,6 +32,12 @@
schema_versions_info = [{'min': None, 'max': '2.1', 'schema': schemav21},
{'min': '2.2', 'max': None, 'schema': schemav22}]
+ def __init__(self, auth_provider, service, region,
+ ssh_key_type='rsa', **kwargs):
+ super(KeyPairsClient, self).__init__(
+ auth_provider, service, region, **kwargs)
+ self.ssh_key_type = ssh_key_type
+
def list_keypairs(self, **params):
"""Lists keypairs that are associated with the account.
@@ -67,12 +77,30 @@
API reference:
https://docs.openstack.org/api-ref/compute/#create-or-import-keypair
"""
+ pkey = None
+ if (self.ssh_key_type == 'ecdsa' and 'public_key' not in kwargs and
+ ('type' not in kwargs or kwargs['type'] == 'ssh')):
+ # create a ecdsa key and pass the public key into the request
+ pkey = ec.generate_private_key(ec.SECP384R1(), default_backend())
+ pubkey = pkey.public_key().public_bytes(
+ encoding=serialization.Encoding.OpenSSH,
+ format=serialization.PublicFormat.OpenSSH)
+ kwargs['public_key'] = pubkey
+
post_body = json.dumps({'keypair': kwargs})
resp, body = self.post("os-keypairs", body=post_body)
body = json.loads(body)
schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.create_keypair, resp, body)
- return rest_client.ResponseBody(resp, body)
+ resp_body = rest_client.ResponseBody(resp, body)
+ if pkey:
+ # add the privkey to the response as it was generated here
+ privkey = pkey.private_bytes(
+ encoding=serialization.Encoding.PEM,
+ format=serialization.PrivateFormat.TraditionalOpenSSL,
+ encryption_algorithm=serialization.NoEncryption())
+ resp_body['keypair']['private_key'] = privkey.decode('utf-8')
+ return resp_body
def delete_keypair(self, keypair_name, **params):
"""Deletes a keypair.
diff --git a/tempest/lib/services/compute/servers_client.py b/tempest/lib/services/compute/servers_client.py
index e58890c..ed3d4c0 100644
--- a/tempest/lib/services/compute/servers_client.py
+++ b/tempest/lib/services/compute/servers_client.py
@@ -29,9 +29,12 @@
from tempest.lib.api_schema.response.compute.v2_3 import servers as schemav23
from tempest.lib.api_schema.response.compute.v2_47 import servers as schemav247
from tempest.lib.api_schema.response.compute.v2_48 import servers as schemav248
+from tempest.lib.api_schema.response.compute.v2_51 import servers as schemav251
from tempest.lib.api_schema.response.compute.v2_54 import servers as schemav254
from tempest.lib.api_schema.response.compute.v2_57 import servers as schemav257
+from tempest.lib.api_schema.response.compute.v2_58 import servers as schemav258
from tempest.lib.api_schema.response.compute.v2_6 import servers as schemav26
+from tempest.lib.api_schema.response.compute.v2_62 import servers as schemav262
from tempest.lib.api_schema.response.compute.v2_63 import servers as schemav263
from tempest.lib.api_schema.response.compute.v2_70 import servers as schemav270
from tempest.lib.api_schema.response.compute.v2_71 import servers as schemav271
@@ -56,9 +59,12 @@
{'min': '2.19', 'max': '2.25', 'schema': schemav219},
{'min': '2.26', 'max': '2.46', 'schema': schemav226},
{'min': '2.47', 'max': '2.47', 'schema': schemav247},
- {'min': '2.48', 'max': '2.53', 'schema': schemav248},
+ {'min': '2.48', 'max': '2.50', 'schema': schemav248},
+ {'min': '2.51', 'max': '2.53', 'schema': schemav251},
{'min': '2.54', 'max': '2.56', 'schema': schemav254},
- {'min': '2.57', 'max': '2.62', 'schema': schemav257},
+ {'min': '2.57', 'max': '2.57', 'schema': schemav257},
+ {'min': '2.58', 'max': '2.61', 'schema': schemav258},
+ {'min': '2.62', 'max': '2.62', 'schema': schemav262},
{'min': '2.63', 'max': '2.69', 'schema': schemav263},
{'min': '2.70', 'max': '2.70', 'schema': schemav270},
{'min': '2.71', 'max': '2.72', 'schema': schemav271},
@@ -715,6 +721,7 @@
resp, body = self.get("servers/%s/os-instance-actions/%s" %
(server_id, request_id))
body = json.loads(body)
+ schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.show_instance_action, resp, body)
return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/network/__init__.py b/tempest/lib/services/network/__init__.py
index a0e6313..faf35d1 100644
--- a/tempest/lib/services/network/__init__.py
+++ b/tempest/lib/services/network/__init__.py
@@ -15,6 +15,8 @@
from tempest.lib.services.network.agents_client import AgentsClient
from tempest.lib.services.network.extensions_client import ExtensionsClient
from tempest.lib.services.network.floating_ips_client import FloatingIPsClient
+from tempest.lib.services.network.floating_ips_port_forwarding_client import \
+ FloatingIpsPortForwardingClient
from tempest.lib.services.network.log_resource_client import LogResourceClient
from tempest.lib.services.network.loggable_resource_client import \
LoggableResourceClient
@@ -29,6 +31,8 @@
QosLimitBandwidthRulesClient
from tempest.lib.services.network.qos_minimum_bandwidth_rules_client import \
QosMinimumBandwidthRulesClient
+from tempest.lib.services.network.qos_minimum_packet_rate_rules_client import \
+ QosMinimumPacketRateRulesClient
from tempest.lib.services.network.quotas_client import QuotasClient
from tempest.lib.services.network.routers_client import RoutersClient
from tempest.lib.services.network.security_group_rules_client import \
@@ -45,11 +49,11 @@
from tempest.lib.services.network.versions_client import NetworkVersionsClient
__all__ = ['AgentsClient', 'ExtensionsClient', 'FloatingIPsClient',
- 'MeteringLabelRulesClient', 'MeteringLabelsClient',
- 'NetworksClient', 'NetworkVersionsClient', 'PortsClient',
- 'QosClient', 'QosMinimumBandwidthRulesClient',
+ 'FloatingIpsPortForwardingClient', 'MeteringLabelRulesClient',
+ 'MeteringLabelsClient', 'NetworksClient', 'NetworkVersionsClient',
+ 'PortsClient', 'QosClient', 'QosMinimumBandwidthRulesClient',
'QosLimitBandwidthRulesClient', 'QuotasClient', 'RoutersClient',
'SecurityGroupRulesClient', 'SecurityGroupsClient',
'SegmentsClient', 'ServiceProvidersClient', 'SubnetpoolsClient',
'SubnetsClient', 'TagsClient', 'TrunksClient', 'LogResourceClient',
- 'LoggableResourceClient']
+ 'LoggableResourceClient', 'QosMinimumPacketRateRulesClient']
diff --git a/tempest/lib/services/network/floating_ips_port_forwarding_client.py b/tempest/lib/services/network/floating_ips_port_forwarding_client.py
new file mode 100644
index 0000000..43e24ea
--- /dev/null
+++ b/tempest/lib/services/network/floating_ips_port_forwarding_client.py
@@ -0,0 +1,78 @@
+# Copyright 2021 Red Hat, Inc.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.network import base
+
+
+class FloatingIpsPortForwardingClient(base.BaseNetworkClient):
+
+ def create_port_forwarding(self, floatingip_id, **kwargs):
+ """Creates a floating IP port forwarding.
+
+ Creates port forwarding by using the configuration that you define in
+ the request object.
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#create-port-forwarding
+ """
+ uri = '/floatingips/%s/port_forwardings' % floatingip_id
+ post_data = {'port_forwarding': kwargs}
+ return self.create_resource(uri, post_data)
+
+ def update_port_forwarding(
+ self, floatingip_id, port_forwarding_id, **kwargs):
+ """Updates a floating IP port_forwarding resource.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#update-a-port-forwarding
+ """
+ uri = '/floatingips/%s/port_forwardings/%s' % (
+ floatingip_id, port_forwarding_id)
+ post_data = {'port_forwarding': kwargs}
+ return self.update_resource(uri, post_data)
+
+ def show_port_forwarding(
+ self, floatingip_id, port_forwarding_id, **fields):
+ """Shows details for a floating IP port forwarding id.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#show-port-forwarding
+ """
+ uri = '/floatingips/%s/port_forwardings/%s' % (
+ floatingip_id, port_forwarding_id)
+ return self.show_resource(uri, **fields)
+
+ def delete_port_forwarding(self, floatingip_id, port_forwarding_id):
+ """Deletes a floating IP port_forwarding resource.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#delete-a-floating-ip-port-forwarding
+ """
+ uri = '/floatingips/%s/port_forwardings/%s' % (
+ floatingip_id, port_forwarding_id)
+ return self.delete_resource(uri)
+
+ def list_port_forwardings(self, floatingip_id, **filters):
+ """Lists floating Ip port forwardings.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/network/v2/index.html#list-floating-ip-port-forwardings-detail
+ """
+ uri = '/floatingips/%s/port_forwardings' % floatingip_id
+ return self.list_resources(uri, **filters)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 39021d5..7aa96b2 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -30,7 +30,6 @@
from tempest.common import waiters
from tempest import config
from tempest import exceptions
-from tempest.lib.common import api_microversion_fixture
from tempest.lib.common import api_version_utils
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
@@ -85,12 +84,10 @@
cls.placement_min_microversion,
CONF.placement.min_microversion))
- def setUp(self):
- super(ScenarioTest, self).setUp()
- self.useFixture(api_microversion_fixture.APIMicroversionFixture(
- compute_microversion=self.compute_request_microversion,
- volume_microversion=self.volume_request_microversion,
- placement_microversion=self.placement_request_microversion))
+ cls.setup_api_microversion_fixture(
+ compute_microversion=cls.compute_request_microversion,
+ volume_microversion=cls.volume_request_microversion,
+ placement_microversion=cls.placement_request_microversion)
def setup_compute_client(cls):
"""Compute client"""
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 2c981c8..5aac19c 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -96,13 +96,6 @@
'%s' % (secgroup['id'], server['id']))
raise exceptions.TimeoutException(msg)
- def _get_floating_ip_in_server_addresses(self, floating_ip, server):
- for addresses in server['addresses'].values():
- for address in addresses:
- if (address['OS-EXT-IPS:type'] == 'floating' and
- address['addr'] == floating_ip['floating_ip_address']):
- return address
-
@decorators.idempotent_id('bdbb5441-9204-419d-a225-b4fdbfb1a1a8')
@utils.services('compute', 'volume', 'image', 'network')
def test_minimum_basic_scenario(self):
@@ -132,15 +125,8 @@
fip = self.create_floating_ip(server)
floating_ip = self.associate_floating_ip(
fip, server)
- # fetch the server again to make sure the addresses were refreshed
- # after associating the floating IP
- server = self.servers_client.show_server(server['id'])['server']
- address = self._get_floating_ip_in_server_addresses(
- floating_ip, server)
- self.assertIsNotNone(
- address,
- "Failed to find floating IP '%s' in server addresses: %s" %
- (floating_ip['floating_ip_address'], server['addresses']))
+ waiters.wait_for_server_floating_ip(self.servers_client,
+ server, floating_ip)
ssh_ip = floating_ip['floating_ip_address']
else:
ssh_ip = self.get_server_ip(server)
@@ -165,19 +151,6 @@
if floating_ip:
# delete the floating IP, this should refresh the server addresses
self.disassociate_floating_ip(floating_ip)
-
- def is_floating_ip_detached_from_server():
- server_info = self.servers_client.show_server(
- server['id'])['server']
- address = self._get_floating_ip_in_server_addresses(
- floating_ip, server_info)
- return (not address)
-
- if not test_utils.call_until_true(
- is_floating_ip_detached_from_server,
- CONF.compute.build_timeout,
- CONF.compute.build_interval):
- msg = ("Floating IP '%s' should not be in server addresses: %s"
- % (floating_ip['floating_ip_address'],
- server['addresses']))
- raise exceptions.TimeoutException(msg)
+ waiters.wait_for_server_floating_ip(
+ self.servers_client, server, floating_ip,
+ wait_for_disassociate=True)
diff --git a/tempest/scenario/test_network_qos_placement.py b/tempest/scenario/test_network_qos_placement.py
index db4751b..365eb1b 100644
--- a/tempest/scenario/test_network_qos_placement.py
+++ b/tempest/scenario/test_network_qos_placement.py
@@ -49,7 +49,10 @@
compute_max_microversion = 'latest'
INGRESS_DIRECTION = 'ingress'
- BW_RESOURCE_CLASS = "NET_BW_IGR_KILOBIT_PER_SEC"
+ EGRESS_DIRECTION = 'egress'
+ ANY_DIRECTION = 'any'
+ INGRESS_RESOURCE_CLASS = "NET_BW_IGR_KILOBIT_PER_SEC"
+ EGRESS_RESOURCE_CLASS = "NET_BW_EGR_KILOBIT_PER_SEC"
# For any realistic inventory value (that is inventory != MAX_INT) an
# allocation candidate request of MAX_INT is expected to be rejected, see:
@@ -76,7 +79,7 @@
new_flavor = self.flavors_client.create_flavor(**{
'ram': old_flavor['ram'],
'vcpus': old_flavor['vcpus'],
- 'name': old_flavor['name'] + 'extra',
+ 'name': old_flavor['name'] + 'extra-%s' % data_utils.rand_int_id(),
'disk': old_flavor['disk'] + 1
})['flavor']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
@@ -106,7 +109,9 @@
super(MinBwAllocationPlacementTest, self).setUp()
self._check_if_allocation_is_possible()
- def _create_policy_and_min_bw_rule(self, name_prefix, min_kbps):
+ def _create_policy_and_min_bw_rule(
+ self, name_prefix, min_kbps, direction="ingress"
+ ):
policy = self.qos_client.create_qos_policy(
name=data_utils.rand_name(name_prefix),
shared=True)['policy']
@@ -116,7 +121,7 @@
policy['id'],
**{
'min_kbps': min_kbps,
- 'direction': self.INGRESS_DIRECTION
+ 'direction': direction,
})['minimum_bandwidth_rule']
self.addCleanup(
test_utils.call_and_ignore_notfound_exc,
@@ -166,20 +171,20 @@
def _check_if_allocation_is_possible(self):
alloc_candidates = self.placement_client.list_allocation_candidates(
- resources1='%s:%s' % (self.BW_RESOURCE_CLASS,
+ resources1='%s:%s' % (self.INGRESS_RESOURCE_CLASS,
self.SMALLEST_POSSIBLE_BW))
if len(alloc_candidates['provider_summaries']) == 0:
self.fail('No allocation candidates are available for %s:%s' %
- (self.BW_RESOURCE_CLASS, self.SMALLEST_POSSIBLE_BW))
+ (self.INGRESS_RESOURCE_CLASS, self.SMALLEST_POSSIBLE_BW))
# Just to be sure check with impossible high (placement max_int),
# allocation
alloc_candidates = self.placement_client.list_allocation_candidates(
- resources1='%s:%s' % (self.BW_RESOURCE_CLASS,
+ resources1='%s:%s' % (self.INGRESS_RESOURCE_CLASS,
self.PLACEMENT_MAX_INT))
if len(alloc_candidates['provider_summaries']) != 0:
self.fail('For %s:%s there should be no available candidate!' %
- (self.BW_RESOURCE_CLASS, self.PLACEMENT_MAX_INT))
+ (self.INGRESS_RESOURCE_CLASS, self.PLACEMENT_MAX_INT))
def _boot_vm_with_min_bw(self, qos_policy_id, status='ACTIVE'):
wait_until = (None if status == 'ERROR' else status)
@@ -193,22 +198,28 @@
status=status, ready_wait=False, raise_on_error=False)
return server, port
- def _assert_allocation_is_as_expected(self, consumer, port_ids,
- min_kbps=SMALLEST_POSSIBLE_BW):
+ def _assert_allocation_is_as_expected(
+ self, consumer, port_ids, min_kbps=SMALLEST_POSSIBLE_BW,
+ expected_rc=NetworkQoSPlacementTestBase.INGRESS_RESOURCE_CLASS,
+ ):
allocations = self.placement_client.list_allocations(
consumer)['allocations']
self.assertGreater(len(allocations), 0)
bw_resource_in_alloc = False
allocation_rp = None
for rp, resources in allocations.items():
- if self.BW_RESOURCE_CLASS in resources['resources']:
+ if expected_rc in resources['resources']:
self.assertEqual(
min_kbps,
- resources['resources'][self.BW_RESOURCE_CLASS])
+ resources['resources'][expected_rc])
bw_resource_in_alloc = True
allocation_rp = rp
if min_kbps:
- self.assertTrue(bw_resource_in_alloc)
+ self.assertTrue(
+ bw_resource_in_alloc,
+ f"expected {min_kbps} bandwidth allocation from {expected_rc} "
+ f"but instance has allocation {allocations} instead."
+ )
# Check binding_profile of the port is not empty and equals with
# the rp uuid
@@ -508,3 +519,554 @@
**{'description': 'foo'})
self._assert_allocation_is_as_expected(server1['id'], [port['id']],
self.BANDWIDTH_1)
+
+ @decorators.idempotent_id('372b2728-cfed-469a-b5f6-b75779e1ccbe')
+ @utils.services('compute', 'network')
+ def test_qos_min_bw_allocation_update_policy_direction_change(self):
+ """Test QoS min bw direction change on a bound port
+
+ Related RFE in neutron: #1882804
+ The scenario is the following:
+ * Have a port with QoS policy and minimum bandwidth rule with ingress
+ direction
+ * Boot a VM with the port.
+ * Update the port with a new policy to egress direction in
+ minimum bandwidth rule.
+ * The allocation on placement side should be according to the new
+ rules.
+ """
+ if not utils.is_network_feature_enabled('update_port_qos'):
+ raise self.skipException("update_port_qos feature is not enabled")
+
+ def create_policies():
+ self.qos_policy_ingress = self._create_policy_and_min_bw_rule(
+ name_prefix='test_policy_ingress',
+ min_kbps=self.BANDWIDTH_1,
+ direction=self.INGRESS_DIRECTION,
+ )
+ self.qos_policy_egress = self._create_policy_and_min_bw_rule(
+ name_prefix='test_policy_egress',
+ min_kbps=self.BANDWIDTH_1,
+ direction=self.EGRESS_DIRECTION,
+ )
+
+ self._create_network_and_qos_policies(create_policies)
+
+ port = self.create_port(
+ self.prov_network['id'],
+ qos_policy_id=self.qos_policy_ingress['id'])
+
+ server1 = self.create_server(
+ networks=[{'port': port['id']}])
+
+ self._assert_allocation_is_as_expected(
+ server1['id'], [port['id']], self.BANDWIDTH_1,
+ expected_rc=self.INGRESS_RESOURCE_CLASS)
+
+ self.ports_client.update_port(
+ port['id'],
+ qos_policy_id=self.qos_policy_egress['id'])
+
+ self._assert_allocation_is_as_expected(
+ server1['id'], [port['id']], self.BANDWIDTH_1,
+ expected_rc=self.EGRESS_RESOURCE_CLASS)
+ self._assert_allocation_is_as_expected(
+ server1['id'], [port['id']], 0,
+ expected_rc=self.INGRESS_RESOURCE_CLASS)
+
+
+class QoSBandwidthAndPacketRateTests(NetworkQoSPlacementTestBase):
+
+ PPS_RESOURCE_CLASS = "NET_PACKET_RATE_KILOPACKET_PER_SEC"
+
+ @classmethod
+ def skip_checks(cls):
+ super().skip_checks()
+ if not CONF.network_feature_enabled.qos_min_bw_and_pps:
+ msg = (
+ "Skipped as no resource inventories are configured for QoS "
+ "minimum bandwidth and packet rate testing.")
+ raise cls.skipException(msg)
+
+ @classmethod
+ def setup_clients(cls):
+ super().setup_clients()
+ cls.qos_min_pps_client = cls.os_admin.qos_min_pps_client
+
+ def setUp(self):
+ super().setUp()
+ self.network = self._create_network()
+
+ def _create_qos_policy_with_bw_and_pps_rules(self, min_kbps, min_kpps):
+ policy = self.qos_client.create_qos_policy(
+ name=data_utils.rand_name(),
+ shared=True
+ )['policy']
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.qos_client.delete_qos_policy,
+ policy['id']
+ )
+
+ if min_kbps > 0:
+ bw_rule = self.qos_min_bw_client.create_minimum_bandwidth_rule(
+ policy['id'],
+ min_kbps=min_kbps,
+ direction=self.INGRESS_DIRECTION
+ )['minimum_bandwidth_rule']
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.qos_min_bw_client.delete_minimum_bandwidth_rule,
+ policy['id'],
+ bw_rule['id']
+ )
+
+ if min_kpps > 0:
+ pps_rule = self.qos_min_pps_client.create_minimum_packet_rate_rule(
+ policy['id'],
+ min_kpps=min_kpps,
+ direction=self.ANY_DIRECTION
+ )['minimum_packet_rate_rule']
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.qos_min_pps_client.delete_minimum_packet_rate_rule,
+ policy['id'],
+ pps_rule['id']
+ )
+
+ return policy
+
+ def _create_network(self):
+ physnet_name = CONF.network_feature_enabled.qos_placement_physnet
+ base_segm = (
+ CONF.network_feature_enabled.provider_net_base_segmentation_id)
+
+ # setup_network_subnet_with_router will add the necessary cleanup calls
+ network, _, _ = self.setup_network_subnet_with_router(
+ networks_client=self.networks_client,
+ routers_client=self.routers_client,
+ subnets_client=self.subnets_client,
+ shared=True,
+ **{
+ 'provider:network_type': 'vlan',
+ 'provider:physical_network': physnet_name,
+ # +1 to be different from the segmentation_id used in
+ # MinBwAllocationPlacementTest
+ 'provider:segmentation_id': int(base_segm) + 1,
+ }
+ )
+ return network
+
+ def _create_port_with_qos_policy(self, policy):
+ port = self.ports_client.create_port(
+ name=data_utils.rand_name(self.__class__.__name__),
+ network_id=self.network['id'],
+ qos_policy_id=policy['id'] if policy else None,
+ )['port']
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.ports_client.delete_port, port['id']
+ )
+ return port
+
+ def assert_allocations(
+ self, server, port, expected_min_kbps, expected_min_kpps
+ ):
+ allocations = self.placement_client.list_allocations(
+ server['id'])['allocations']
+
+ # one allocation for the flavor related resources on the compute RP
+ expected_allocation = 1
+ # one allocation due to bw rule
+ if expected_min_kbps > 0:
+ expected_allocation += 1
+ # one allocation due to pps rule
+ if expected_min_kpps > 0:
+ expected_allocation += 1
+ self.assertEqual(expected_allocation, len(allocations), allocations)
+
+ expected_rp_uuids_in_binding_allocation = set()
+
+ if expected_min_kbps > 0:
+ bw_rp_allocs = {
+ rp: alloc['resources'][self.INGRESS_RESOURCE_CLASS]
+ for rp, alloc in allocations.items()
+ if self.INGRESS_RESOURCE_CLASS in alloc['resources']
+ }
+ self.assertEqual(1, len(bw_rp_allocs))
+ bw_rp, bw_alloc = list(bw_rp_allocs.items())[0]
+ self.assertEqual(expected_min_kbps, bw_alloc)
+ expected_rp_uuids_in_binding_allocation.add(bw_rp)
+
+ if expected_min_kpps > 0:
+ pps_rp_allocs = {
+ rp: alloc['resources'][self.PPS_RESOURCE_CLASS]
+ for rp, alloc in allocations.items()
+ if self.PPS_RESOURCE_CLASS in alloc['resources']
+ }
+ self.assertEqual(1, len(pps_rp_allocs))
+ pps_rp, pps_alloc = list(pps_rp_allocs.items())[0]
+ self.assertEqual(expected_min_kpps, pps_alloc)
+ expected_rp_uuids_in_binding_allocation.add(pps_rp)
+
+ # Let's check port.binding:profile.allocation points to the two
+ # provider resource allocated from
+ port = self.os_admin.ports_client.show_port(port['id'])
+ port_binding_alloc = port[
+ 'port']['binding:profile'].get('allocation', {})
+ self.assertEqual(
+ expected_rp_uuids_in_binding_allocation,
+ set(port_binding_alloc.values())
+ )
+
+ def assert_no_allocation(self, server, port):
+ # check that there are no allocations
+ allocations = self.placement_client.list_allocations(
+ server['id'])['allocations']
+ self.assertEqual(0, len(allocations))
+
+ # check that binding_profile of the port is empty
+ port = self.os_admin.ports_client.show_port(port['id'])
+ self.assertEqual(0, len(port['port']['binding:profile']))
+
+ @decorators.idempotent_id('93d1a88d-235e-4b7b-b44d-2a17dcf4e213')
+ @utils.services('compute', 'network')
+ def test_server_create_delete(self):
+ min_kbps = 1000
+ min_kpps = 100
+ policy = self._create_qos_policy_with_bw_and_pps_rules(
+ min_kbps, min_kpps)
+ port = self._create_port_with_qos_policy(policy)
+
+ server = self.create_server(
+ networks=[{'port': port['id']}],
+ wait_until='ACTIVE'
+ )
+
+ self.assert_allocations(server, port, min_kbps, min_kpps)
+
+ self.servers_client.delete_server(server['id'])
+ waiters.wait_for_server_termination(self.servers_client, server['id'])
+
+ self.assert_no_allocation(server, port)
+
+ def _test_create_server_negative(self, min_kbps=1000, min_kpps=100):
+ policy = self._create_qos_policy_with_bw_and_pps_rules(
+ min_kbps, min_kpps)
+ port = self._create_port_with_qos_policy(policy)
+
+ server = self.create_server(
+ networks=[{'port': port['id']}],
+ wait_until=None)
+ waiters.wait_for_server_status(
+ client=self.servers_client, server_id=server['id'],
+ status='ERROR', ready_wait=False, raise_on_error=False)
+
+ # check that the creation failed with No valid host
+ server = self.servers_client.show_server(server['id'])['server']
+ self.assertIn('fault', server)
+ self.assertIn('No valid host', server['fault']['message'])
+
+ self.assert_no_allocation(server, port)
+
+ @decorators.idempotent_id('915dd2ce-4890-40c8-9db6-f3e04080c6c1')
+ @utils.services('compute', 'network')
+ def test_server_create_no_valid_host_due_to_bandwidth(self):
+ self._test_create_server_negative(min_kbps=self.PLACEMENT_MAX_INT)
+
+ @decorators.idempotent_id('2d4a755e-10b9-4ac0-bef2-3f89de1f150b')
+ @utils.services('compute', 'network')
+ def test_server_create_no_valid_host_due_to_packet_rate(self):
+ self._test_create_server_negative(min_kpps=self.PLACEMENT_MAX_INT)
+
+ @decorators.idempotent_id('69d93e4f-0dfc-4d17-8d84-cc5c3c842cd5')
+ @testtools.skipUnless(
+ CONF.compute_feature_enabled.resize, 'Resize not available.')
+ @utils.services('compute', 'network')
+ def test_server_resize(self):
+ min_kbps = 1000
+ min_kpps = 100
+ policy = self._create_qos_policy_with_bw_and_pps_rules(
+ min_kbps, min_kpps)
+ port = self._create_port_with_qos_policy(policy)
+
+ server = self.create_server(
+ networks=[{'port': port['id']}],
+ wait_until='ACTIVE'
+ )
+
+ self.assert_allocations(server, port, min_kbps, min_kpps)
+
+ new_flavor = self._create_flavor_to_resize_to()
+
+ self.servers_client.resize_server(
+ server_id=server['id'], flavor_ref=new_flavor['id']
+ )
+ waiters.wait_for_server_status(
+ client=self.servers_client, server_id=server['id'],
+ status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False)
+
+ self.assert_allocations(server, port, min_kbps, min_kpps)
+
+ self.servers_client.confirm_resize_server(server_id=server['id'])
+ waiters.wait_for_server_status(
+ client=self.servers_client, server_id=server['id'],
+ status='ACTIVE', ready_wait=False, raise_on_error=True)
+
+ self.assert_allocations(server, port, min_kbps, min_kpps)
+
+ @decorators.idempotent_id('d01d4aee-ca06-4e4e-add7-8a47fe0daf96')
+ @testtools.skipUnless(
+ CONF.compute_feature_enabled.resize, 'Resize not available.')
+ @utils.services('compute', 'network')
+ def test_server_resize_revert(self):
+ min_kbps = 1000
+ min_kpps = 100
+ policy = self._create_qos_policy_with_bw_and_pps_rules(
+ min_kbps, min_kpps)
+ port = self._create_port_with_qos_policy(policy)
+
+ server = self.create_server(
+ networks=[{'port': port['id']}],
+ wait_until='ACTIVE'
+ )
+
+ self.assert_allocations(server, port, min_kbps, min_kpps)
+
+ new_flavor = self._create_flavor_to_resize_to()
+
+ self.servers_client.resize_server(
+ server_id=server['id'], flavor_ref=new_flavor['id']
+ )
+ waiters.wait_for_server_status(
+ client=self.servers_client, server_id=server['id'],
+ status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False)
+
+ self.assert_allocations(server, port, min_kbps, min_kpps)
+
+ self.servers_client.revert_resize_server(server_id=server['id'])
+ waiters.wait_for_server_status(
+ client=self.servers_client, server_id=server['id'],
+ status='ACTIVE', ready_wait=False, raise_on_error=True)
+
+ self.assert_allocations(server, port, min_kbps, min_kpps)
+
+ @decorators.idempotent_id('bdd0b31c-c8b0-4b7b-b80a-545a46b32abe')
+ @testtools.skipUnless(
+ CONF.compute_feature_enabled.cold_migration,
+ 'Cold migration is not available.')
+ @testtools.skipUnless(
+ CONF.compute.min_compute_nodes > 1,
+ 'Less than 2 compute nodes, skipping multinode tests.')
+ @utils.services('compute', 'network')
+ def test_server_migrate(self):
+ min_kbps = 1000
+ min_kpps = 100
+ policy = self._create_qos_policy_with_bw_and_pps_rules(
+ min_kbps, min_kpps)
+ port = self._create_port_with_qos_policy(policy)
+
+ server = self.create_server(
+ networks=[{'port': port['id']}],
+ wait_until='ACTIVE'
+ )
+
+ self.assert_allocations(server, port, min_kbps, min_kpps)
+
+ self.os_adm.servers_client.migrate_server(server_id=server['id'])
+ waiters.wait_for_server_status(
+ client=self.servers_client, server_id=server['id'],
+ status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False)
+
+ self.assert_allocations(server, port, min_kbps, min_kpps)
+
+ self.os_adm.servers_client.confirm_resize_server(
+ server_id=server['id'])
+ waiters.wait_for_server_status(
+ client=self.servers_client, server_id=server['id'],
+ status='ACTIVE', ready_wait=False, raise_on_error=True)
+
+ self.assert_allocations(server, port, min_kbps, min_kpps)
+
+ @decorators.idempotent_id('fdb260e3-caa5-482d-ac7c-8c22adf3d750')
+ @utils.services('compute', 'network')
+ def test_qos_policy_update_on_bound_port(self):
+ min_kbps = 1000
+ min_kpps = 100
+ policy = self._create_qos_policy_with_bw_and_pps_rules(
+ min_kbps, min_kpps)
+
+ min_kbps2 = 2000
+ min_kpps2 = 50
+ policy2 = self._create_qos_policy_with_bw_and_pps_rules(
+ min_kbps2, min_kpps2)
+
+ port = self._create_port_with_qos_policy(policy)
+
+ server = self.create_server(
+ networks=[{'port': port['id']}],
+ wait_until='ACTIVE'
+ )
+
+ self.assert_allocations(server, port, min_kbps, min_kpps)
+
+ self.ports_client.update_port(
+ port['id'],
+ qos_policy_id=policy2['id'])
+
+ self.assert_allocations(server, port, min_kbps2, min_kpps2)
+
+ @decorators.idempotent_id('e6a20125-a02e-49f5-bcf6-894305ee3715')
+ @utils.services('compute', 'network')
+ def test_qos_policy_update_on_bound_port_from_null_policy(self):
+ min_kbps = 1000
+ min_kpps = 100
+ policy = self._create_qos_policy_with_bw_and_pps_rules(
+ min_kbps, min_kpps)
+
+ port = self._create_port_with_qos_policy(policy=None)
+
+ server = self.create_server(
+ networks=[{'port': port['id']}],
+ wait_until='ACTIVE'
+ )
+
+ self.assert_allocations(server, port, 0, 0)
+
+ self.ports_client.update_port(
+ port['id'],
+ qos_policy_id=policy['id'])
+
+ # NOTE(gibi): This is unintuitive but it is the expected behavior.
+ # If there was no policy attached to the port when the server was
+ # created then neutron still allows adding a policy to the port later
+ # as this operation was support before placement enforcement was added
+ # for the qos minimum bandwidth rule. However neutron cannot create
+ # the placement resource allocation for this port.
+ self.assert_allocations(server, port, 0, 0)
+
+ @decorators.idempotent_id('f5864761-966c-4e49-b430-ac0044b7d658')
+ @utils.services('compute', 'network')
+ def test_qos_policy_update_on_bound_port_additional_rule(self):
+ min_kbps = 1000
+ policy = self._create_qos_policy_with_bw_and_pps_rules(
+ min_kbps, 0)
+
+ min_kbps2 = 2000
+ min_kpps2 = 50
+ policy2 = self._create_qos_policy_with_bw_and_pps_rules(
+ min_kbps2, min_kpps2)
+
+ port = self._create_port_with_qos_policy(policy=policy)
+
+ server = self.create_server(
+ networks=[{'port': port['id']}],
+ wait_until='ACTIVE'
+ )
+
+ self.assert_allocations(server, port, min_kbps, 0)
+
+ self.ports_client.update_port(
+ port['id'],
+ qos_policy_id=policy2['id'])
+
+ # FIXME(gibi): Agree in the spec: do we ignore the pps request or we
+ # reject the update? It seems current implementation goes with
+ # ignoring the additional pps rule.
+ self.assert_allocations(server, port, min_kbps2, 0)
+
+ @decorators.idempotent_id('fbbb9c81-ed21-48c3-bdba-ce2361e93aad')
+ @utils.services('compute', 'network')
+ def test_qos_policy_update_on_bound_port_to_null_policy(self):
+ min_kbps = 1000
+ min_kpps = 100
+ policy = self._create_qos_policy_with_bw_and_pps_rules(
+ min_kbps, min_kpps)
+
+ port = self._create_port_with_qos_policy(policy=policy)
+
+ server = self.create_server(
+ networks=[{'port': port['id']}],
+ wait_until='ACTIVE'
+ )
+
+ self.assert_allocations(server, port, min_kbps, min_kpps)
+
+ self.ports_client.update_port(
+ port['id'],
+ qos_policy_id=None)
+
+ self.assert_allocations(server, port, 0, 0)
+
+ @decorators.idempotent_id('0393d038-03ad-4844-a0e4-83010f69dabb')
+ @utils.services('compute', 'network')
+ def test_interface_attach_detach(self):
+ min_kbps = 1000
+ min_kpps = 100
+ policy = self._create_qos_policy_with_bw_and_pps_rules(
+ min_kbps, min_kpps)
+
+ port = self._create_port_with_qos_policy(policy=None)
+
+ port2 = self._create_port_with_qos_policy(policy=policy)
+
+ server = self.create_server(
+ networks=[{'port': port['id']}],
+ wait_until='ACTIVE'
+ )
+
+ self.assert_allocations(server, port, 0, 0)
+
+ self.interface_client.create_interface(
+ server_id=server['id'],
+ port_id=port2['id'])
+ waiters.wait_for_interface_status(
+ self.interface_client, server['id'], port2['id'], 'ACTIVE')
+
+ self.assert_allocations(server, port2, min_kbps, min_kpps)
+
+ req_id = self.interface_client.delete_interface(
+ server_id=server['id'],
+ port_id=port2['id']).response['x-openstack-request-id']
+ waiters.wait_for_interface_detach(
+ self.servers_client, server['id'], port2['id'], req_id)
+
+ self.assert_allocations(server, port2, 0, 0)
+
+ @decorators.idempotent_id('36ffdb85-6cc2-4cc9-a426-cad5bac8626b')
+ @testtools.skipUnless(
+ CONF.compute.min_compute_nodes > 1,
+ 'Less than 2 compute nodes, skipping multinode tests.')
+ @testtools.skipUnless(
+ CONF.compute_feature_enabled.live_migration,
+ 'Live migration not available')
+ @utils.services('compute', 'network')
+ def test_server_live_migrate(self):
+ min_kbps = 1000
+ min_kpps = 100
+ policy = self._create_qos_policy_with_bw_and_pps_rules(
+ min_kbps, min_kpps)
+
+ port = self._create_port_with_qos_policy(policy=policy)
+
+ server = self.create_server(
+ networks=[{'port': port['id']}],
+ wait_until='ACTIVE'
+ )
+
+ self.assert_allocations(server, port, min_kbps, min_kpps)
+
+ server_details = self.os_adm.servers_client.show_server(server['id'])
+ source_host = server_details['server']['OS-EXT-SRV-ATTR:host']
+
+ self.os_adm.servers_client.live_migrate_server(
+ server['id'], block_migration=True, host=None)
+ waiters.wait_for_server_status(
+ self.servers_client, server['id'], 'ACTIVE')
+
+ server_details = self.os_adm.servers_client.show_server(server['id'])
+ new_host = server_details['server']['OS-EXT-SRV-ATTR:host']
+
+ self.assertNotEqual(source_host, new_host, "Live migration failed")
+
+ self.assert_allocations(server, port, min_kbps, min_kpps)
diff --git a/tempest/test.py b/tempest/test.py
index 8ea3b16..dba2695 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -26,7 +26,7 @@
from tempest.common import credentials_factory as credentials
from tempest.common import utils
from tempest import config
-from tempest.lib import base as lib_base
+from tempest.lib.common import api_microversion_fixture
from tempest.lib.common import fixed_network
from tempest.lib.common import profiler
from tempest.lib.common import validation_resources as vr
@@ -141,19 +141,6 @@
# It should never be overridden by descendants
if hasattr(super(BaseTestCase, cls), 'setUpClass'):
super(BaseTestCase, cls).setUpClass()
- # All the configuration checks that may generate a skip
- # TODO(gmann): cls.handle_skip_exception is really workaround for
- # testtools bug- https://github.com/testing-cabal/testtools/issues/272
- # stestr which is used by Tempest internally to run the test switch
- # the customize test runner(which use stdlib unittest) for >=py3.5
- # else testtools.run.- https://github.com/mtreinish/stestr/pull/265
- # These two test runner are not compatible due to skip exception
- # handling(due to unittest2). testtools.run treat unittestt.SkipTest
- # as error and stdlib unittest treat unittest2.case.SkipTest raised
- # by testtools.TestCase.skipException.
- # The below workaround can be removed once testtools fix issue# 272.
- orig_skip_exception = testtools.TestCase.skipException
- lib_base._handle_skip_exception()
try:
cls.skip_checks()
@@ -181,8 +168,6 @@
raise value.with_traceback(trace)
finally:
del trace # to avoid circular refs
- finally:
- testtools.TestCase.skipException = orig_skip_exception
@classmethod
def tearDownClass(cls):
@@ -480,6 +465,34 @@
pass
@classmethod
+ def setup_api_microversion_fixture(
+ cls, compute_microversion=None, volume_microversion=None,
+ placement_microversion=None):
+ """Set up api microversion fixture on service clients.
+
+ `setup_api_microversion_fixture` is used to set the api microversion
+ on service clients. This can be invoked from resource_setup() method.
+
+ Example::
+
+ @classmethod
+ def resource_setup(cls):
+ super(MyTest, cls).resource_setup()
+ cls.setup_api_microversion_fixture(
+ compute_microversion=cls.compute_request_microversion,
+ volume_microversion=cls.volume_request_microversion,
+ placement_microversion=cls.placement_request_microversion)
+
+ """
+
+ api_fixture = api_microversion_fixture.APIMicroversionFixture(
+ compute_microversion=compute_microversion,
+ volume_microversion=volume_microversion,
+ placement_microversion=placement_microversion)
+ api_fixture.setUp()
+ cls.addClassResourceCleanup(api_fixture._reset_microversion)
+
+ @classmethod
def resource_setup(cls):
"""Class level resource setup for test cases.
diff --git a/tempest/test_discover/test_discover.py b/tempest/test_discover/test_discover.py
index 5816ab1..a19f20b 100644
--- a/tempest/test_discover/test_discover.py
+++ b/tempest/test_discover/test_discover.py
@@ -13,15 +13,10 @@
# under the License.
import os
-import sys
+import unittest
from tempest.test_discover import plugins
-if sys.version_info >= (2, 7):
- import unittest
-else:
- import unittest2 as unittest
-
def load_tests(loader, tests, pattern):
ext_plugins = plugins.TempestTestPluginManager()
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index 5cdbfbf..5b0acfa 100755
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -186,69 +186,96 @@
mock.call('server_id', 'port_id')])
sleep.assert_called_once_with(client.build_interval)
- one_interface = {'interfaceAttachments': [{'port_id': 'port_one'}]}
- two_interfaces = {'interfaceAttachments': [{'port_id': 'port_one'},
- {'port_id': 'port_two'}]}
-
def test_wait_for_interface_detach(self):
- list_interfaces = mock.MagicMock(
- side_effect=[self.two_interfaces, self.one_interface])
- client = self.mock_client(list_interfaces=list_interfaces)
+ no_event = {
+ 'instanceAction': {
+ 'events': []
+ }
+ }
+ one_event_without_result = {
+ 'instanceAction': {
+ 'events': [
+ {
+ 'event': 'compute_detach_interface',
+ 'result': None
+ }
+
+ ]
+ }
+ }
+ one_event_successful = {
+ 'instanceAction': {
+ 'events': [
+ {
+ 'event': 'compute_detach_interface',
+ 'result': 'Success'
+ }
+ ]
+ }
+ }
+
+ show_instance_action = mock.MagicMock(
+ # there is an extra call to return the result from the waiter
+ side_effect=[
+ no_event,
+ one_event_without_result,
+ one_event_successful,
+ one_event_successful,
+ ]
+ )
+ client = self.mock_client(show_instance_action=show_instance_action)
self.patch('time.time', return_value=0.)
sleep = self.patch('time.sleep')
result = waiters.wait_for_interface_detach(
- client, 'server_id', 'port_two')
+ client, mock.sentinel.server_id, mock.sentinel.port_id,
+ mock.sentinel.detach_request_id
+ )
- self.assertIs(self.one_interface['interfaceAttachments'], result)
- list_interfaces.assert_has_calls([mock.call('server_id'),
- mock.call('server_id')])
- sleep.assert_called_once_with(client.build_interval)
+ self.assertIs(one_event_successful['instanceAction'], result)
+ show_instance_action.assert_has_calls(
+ # there is an extra call to return the result from the waiter
+ [
+ mock.call(
+ mock.sentinel.server_id, mock.sentinel.detach_request_id)
+ ] * 4
+ )
+ sleep.assert_has_calls([mock.call(client.build_interval)] * 2)
def test_wait_for_interface_detach_timeout(self):
- list_interfaces = mock.MagicMock(return_value=self.one_interface)
- client = self.mock_client(list_interfaces=list_interfaces)
+ one_event_without_result = {
+ 'instanceAction': {
+ 'events': [
+ {
+ 'event': 'compute_detach_interface',
+ 'result': None
+ }
+
+ ]
+ }
+ }
+
+ show_instance_action = mock.MagicMock(
+ return_value=one_event_without_result)
+ client = self.mock_client(show_instance_action=show_instance_action)
self.patch('time.time', side_effect=[0., client.build_timeout + 1.])
sleep = self.patch('time.sleep')
- self.assertRaises(lib_exc.TimeoutException,
- waiters.wait_for_interface_detach,
- client, 'server_id', 'port_one')
+ self.assertRaises(
+ lib_exc.TimeoutException,
+ waiters.wait_for_interface_detach,
+ client, mock.sentinel.server_id, mock.sentinel.port_id,
+ mock.sentinel.detach_request_id
+ )
- list_interfaces.assert_has_calls([mock.call('server_id'),
- mock.call('server_id')])
+ show_instance_action.assert_has_calls(
+ [
+ mock.call(
+ mock.sentinel.server_id, mock.sentinel.detach_request_id)
+ ] * 2
+ )
sleep.assert_called_once_with(client.build_interval)
- def test_wait_for_guest_os_boot(self):
- get_console_output = mock.Mock(
- side_effect=[
- {'output': 'os not ready yet\n'},
- {'output': 'login:\n'}
- ])
- client = self.mock_client(get_console_output=get_console_output)
- self.patch('time.time', return_value=0.)
- sleep = self.patch('time.sleep')
-
- with mock.patch.object(waiters.LOG, "info") as log_info:
- waiters.wait_for_guest_os_boot(client, 'server_id')
-
- get_console_output.assert_has_calls([
- mock.call('server_id'), mock.call('server_id')])
- sleep.assert_called_once_with(client.build_interval)
- log_info.assert_not_called()
-
- def test_wait_for_guest_os_boot_timeout(self):
- get_console_output = mock.Mock(
- return_value={'output': 'os not ready yet\n'})
- client = self.mock_client(get_console_output=get_console_output)
- self.patch('time.time', side_effect=[0., client.build_timeout + 1.])
- self.patch('time.sleep')
-
- with mock.patch.object(waiters.LOG, "info") as log_info:
- waiters.wait_for_guest_os_boot(client, 'server_id')
-
- log_info.assert_called_once()
-
class TestVolumeWaiters(base.TestCase):
vol_migrating_src_host = {
@@ -495,3 +522,37 @@
# Assert that list_volume_attachments was actually called
mock_list_volume_attachments.assert_called_once_with(
mock.sentinel.server_id)
+
+
+class TestServerFloatingIPWaiters(base.TestCase):
+
+ def test_wait_for_server_floating_ip_associate_timeout(self):
+ mock_server = {'server': {'id': 'fake_uuid', 'addresses': {}}}
+ mock_client = mock.Mock(
+ spec=servers_client.ServersClient,
+ build_timeout=1, build_interval=1,
+ show_server=lambda id: mock_server)
+
+ fake_server = {'id': 'fake-uuid'}
+ fake_fip = {'floating_ip_address': 'fake_address'}
+ self.assertRaises(
+ lib_exc.TimeoutException,
+ waiters.wait_for_server_floating_ip, mock_client, fake_server,
+ fake_fip)
+
+ def test_wait_for_server_floating_ip_disassociate_timeout(self):
+ mock_addresses = {'shared': [{'OS-EXT-IPS:type': 'floating',
+ 'addr': 'fake_address'}]}
+ mock_server = {'server': {'id': 'fake_uuid',
+ 'addresses': mock_addresses}}
+ mock_client = mock.Mock(
+ spec=servers_client.ServersClient,
+ build_timeout=1, build_interval=1,
+ show_server=lambda id: mock_server)
+
+ fake_server = {'id': 'fake-uuid'}
+ fake_fip = {'floating_ip_address': 'fake_address'}
+ self.assertRaises(
+ lib_exc.TimeoutException,
+ waiters.wait_for_server_floating_ip, mock_client, fake_server,
+ fake_fip, wait_for_disassociate=True)
diff --git a/tempest/tests/lib/common/test_rest_client.py b/tempest/tests/lib/common/test_rest_client.py
index c5f6d7a..1dea5f5 100644
--- a/tempest/tests/lib/common/test_rest_client.py
+++ b/tempest/tests/lib/common/test_rest_client.py
@@ -144,11 +144,11 @@
extra_headers=True,
headers=self.headers)
- self.assertDictContainsSubset(
+ self.assertLessEqual(
{'X-Configuration-Session': 'session_id',
'Content-Type': 'application/json',
- 'Accept': 'application/json'},
- return_dict['headers']
+ 'Accept': 'application/json'}.items(),
+ return_dict['headers'].items()
)
def test_get_update_headers(self):
@@ -156,11 +156,11 @@
extra_headers=True,
headers=self.headers)
- self.assertDictContainsSubset(
+ self.assertLessEqual(
{'X-Configuration-Session': 'session_id',
'Content-Type': 'application/json',
- 'Accept': 'application/json'},
- return_dict['headers']
+ 'Accept': 'application/json'}.items(),
+ return_dict['headers'].items()
)
def test_delete_update_headers(self):
@@ -168,11 +168,11 @@
extra_headers=True,
headers=self.headers)
- self.assertDictContainsSubset(
+ self.assertLessEqual(
{'X-Configuration-Session': 'session_id',
'Content-Type': 'application/json',
- 'Accept': 'application/json'},
- return_dict['headers']
+ 'Accept': 'application/json'}.items(),
+ return_dict['headers'].items()
)
def test_patch_update_headers(self):
@@ -180,11 +180,11 @@
extra_headers=True,
headers=self.headers)
- self.assertDictContainsSubset(
+ self.assertLessEqual(
{'X-Configuration-Session': 'session_id',
'Content-Type': 'application/json',
- 'Accept': 'application/json'},
- return_dict['headers']
+ 'Accept': 'application/json'}.items(),
+ return_dict['headers'].items()
)
def test_put_update_headers(self):
@@ -192,11 +192,11 @@
extra_headers=True,
headers=self.headers)
- self.assertDictContainsSubset(
+ self.assertLessEqual(
{'X-Configuration-Session': 'session_id',
'Content-Type': 'application/json',
- 'Accept': 'application/json'},
- return_dict['headers']
+ 'Accept': 'application/json'}.items(),
+ return_dict['headers'].items()
)
def test_head_update_headers(self):
@@ -207,11 +207,11 @@
extra_headers=True,
headers=self.headers)
- self.assertDictContainsSubset(
+ self.assertLessEqual(
{'X-Configuration-Session': 'session_id',
'Content-Type': 'application/json',
- 'Accept': 'application/json'},
- return_dict['headers']
+ 'Accept': 'application/json'}.items(),
+ return_dict['headers'].items()
)
def test_copy_update_headers(self):
@@ -219,11 +219,11 @@
extra_headers=True,
headers=self.headers)
- self.assertDictContainsSubset(
+ self.assertLessEqual(
{'X-Configuration-Session': 'session_id',
'Content-Type': 'application/json',
- 'Accept': 'application/json'},
- return_dict['headers']
+ 'Accept': 'application/json'}.items(),
+ return_dict['headers'].items()
)
diff --git a/tempest/tests/lib/services/image/v2/test_schemas_client.py b/tempest/tests/lib/services/image/v2/test_schemas_client.py
index 4c4b86a..eef5b41 100644
--- a/tempest/tests/lib/services/image/v2/test_schemas_client.py
+++ b/tempest/tests/lib/services/image/v2/test_schemas_client.py
@@ -81,6 +81,14 @@
self.client = schemas_client.SchemasClient(fake_auth,
'image', 'regionOne')
+ def _test_show_schema_members(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.show_schema,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_SHOW_SCHEMA,
+ bytes_body,
+ schema="members")
+
def _test_show_schema(self, bytes_body=False):
self.check_service_client_function(
self.client.show_schema,
@@ -89,6 +97,12 @@
bytes_body,
schema="member")
+ def test_show_schema_members_with_str_body(self):
+ self._test_show_schema_members()
+
+ def test_show_schema_members_with_bytes_body(self):
+ self._test_show_schema_members(bytes_body=True)
+
def test_show_schema_with_str_body(self):
self._test_show_schema()
diff --git a/tempest/tests/lib/services/network/test_floating_ips_port_forwarding_client.py b/tempest/tests/lib/services/network/test_floating_ips_port_forwarding_client.py
new file mode 100644
index 0000000..ce068e9
--- /dev/null
+++ b/tempest/tests/lib/services/network/test_floating_ips_port_forwarding_client.py
@@ -0,0 +1,156 @@
+# Copyright 2021 Red Hat, Inc.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.lib.services.network import floating_ips_port_forwarding_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestFloatingIpsPortForwardingClient(base.BaseServiceTest):
+
+ FAKE_PORT_FORWARDING_REQUEST = {
+
+ "port_forwarding": {
+ "protocol": "tcp",
+ "internal_ip_address": "10.0.0.11",
+ "internal_port": 25,
+ "internal_port_id": "1238be08-a2a8-4b8d-addf-fb5e2250e480",
+ "external_port": 2230,
+ "description": "Some description",
+ }
+
+ }
+
+ FAKE_PORT_FORWARDING_RESPONSE = {
+
+ "port_forwarding": {
+ "protocol": "tcp",
+ "internal_ip_address": "10.0.0.12",
+ "internal_port": 26,
+ "internal_port_id": "1238be08-a2a8-4b8d-addf-fb5e2250e480",
+ "external_port": 2130,
+ "description": "Some description",
+ "id": "825ade3c-9760-4880-8080-8fc2dbab9acc"
+ }
+ }
+
+ FAKE_PORT_FORWARDINGS = {
+ "port_forwardings": [
+ FAKE_PORT_FORWARDING_RESPONSE['port_forwarding']
+ ]
+ }
+
+ FAKE_FLOATINGIP_ID = "a6800594-5b7a-4105-8bfe-723b346ce866"
+
+ FAKE_PORT_FORWARDING_ID = "a7800594-5b7a-4105-8bfe-723b346ce866"
+
+ def setUp(self):
+ super(TestFloatingIpsPortForwardingClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.floating_ips_port_forwarding_client = \
+ floating_ips_port_forwarding_client.\
+ FloatingIpsPortForwardingClient(fake_auth,
+ "network",
+ "regionOne")
+
+ def _test_create_port_forwarding(self, bytes_body=False):
+ self.check_service_client_function(
+ self.floating_ips_port_forwarding_client.
+ create_port_forwarding,
+ "tempest.lib.common.rest_client.RestClient.post",
+ self.FAKE_PORT_FORWARDING_RESPONSE,
+ bytes_body,
+ 201,
+ floatingip_id=self.FAKE_FLOATINGIP_ID,
+ **self.FAKE_PORT_FORWARDING_REQUEST)
+
+ def _test_list_port_forwardings(self, bytes_body=False):
+ self.check_service_client_function(
+ self.floating_ips_port_forwarding_client.
+ list_port_forwardings,
+ "tempest.lib.common.rest_client.RestClient.get",
+ self.FAKE_PORT_FORWARDINGS,
+ bytes_body,
+ 200,
+ floatingip_id=self.FAKE_FLOATINGIP_ID)
+
+ def _test_show_port_forwardings(self, bytes_body=False):
+ self.check_service_client_function(
+ self.floating_ips_port_forwarding_client.
+ show_port_forwarding,
+ "tempest.lib.common.rest_client.RestClient.get",
+ self.FAKE_PORT_FORWARDING_RESPONSE,
+ bytes_body,
+ 200,
+ floatingip_id=self.FAKE_FLOATINGIP_ID,
+ port_forwarding_id=self.FAKE_PORT_FORWARDING_ID)
+
+ def _test_delete_port_forwarding(self):
+ self.check_service_client_function(
+ self.floating_ips_port_forwarding_client.
+ delete_port_forwarding,
+ "tempest.lib.common.rest_client.RestClient.delete",
+ {},
+ status=204,
+ floatingip_id=self.FAKE_FLOATINGIP_ID,
+ port_forwarding_id=self.FAKE_PORT_FORWARDING_ID)
+
+ def _test_update_port_forwarding(self, bytes_body=False):
+ update_kwargs = {
+ "internal_port": "27"
+ }
+
+ resp_body = {
+ "port_forwarding": copy.deepcopy(
+ self.FAKE_PORT_FORWARDING_RESPONSE['port_forwarding']
+ )
+ }
+ resp_body["port_forwarding"].update(update_kwargs)
+
+ self.check_service_client_function(
+ self.floating_ips_port_forwarding_client.update_port_forwarding,
+ "tempest.lib.common.rest_client.RestClient.put",
+ resp_body,
+ bytes_body,
+ 200,
+ floatingip_id=self.FAKE_FLOATINGIP_ID,
+ port_forwarding_id=self.FAKE_PORT_FORWARDING_ID,
+ **update_kwargs)
+
+ def test_list_port_forwardings_with_str_body(self):
+ self._test_list_port_forwardings()
+
+ def test_list_port_forwardings_with_bytes_body(self):
+ self._test_list_port_forwardings(bytes_body=True)
+
+ def test_show_port_forwardings_with_str_body(self):
+ self._test_show_port_forwardings()
+
+ def test_show_port_forwardings_with_bytes_body(self):
+ self._test_show_port_forwardings(bytes_body=True)
+
+ def test_create_port_forwarding_with_str_body(self):
+ self._test_create_port_forwarding()
+
+ def test_create_port_forwarding_with_bytes_body(self):
+ self._test_create_port_forwarding(bytes_body=True)
+
+ def test_update_port_forwarding_with_str_body(self):
+ self._test_update_port_forwarding()
+
+ def test_update_port_forwarding_with_bytes_body(self):
+ self._test_update_port_forwarding(bytes_body=True)
diff --git a/tempest/tests/lib/services/network/test_routers_client.py b/tempest/tests/lib/services/network/test_routers_client.py
index f5dcc7d..20b6853 100644
--- a/tempest/tests/lib/services/network/test_routers_client.py
+++ b/tempest/tests/lib/services/network/test_routers_client.py
@@ -95,6 +95,67 @@
}
}
+ FAKE_ROUTER_ID = "f8a44de0-fc8e-45df-93c7-f79bf3b01c95"
+ FAKE_INTERFACE = {
+ "id": "915a14a6-867b-4af7-83d1-70efceb146f9",
+ "network_id": "91c013e2-d65a-474e-9177-c3e1799ca726",
+ "port_id": "2dc46bcc-d1f2-4077-b99e-91ee28afaff0",
+ "subnet_id": "a2f1f29d-571b-4533-907f-5803ab96ead1",
+ "subnet_ids": [
+ "a2f1f29d-571b-4533-907f-5803ab96ead1"
+ ],
+ "project_id": "0bd18306d801447bb457a46252d82d13",
+ "tenant_id": "0bd18306d801447bb457a46252d82d13",
+ "tags": ["tag1,tag2"]
+ }
+ FAKE_INTERFACE_KWARGS = {
+ "subnet_id": "a2f1f29d-571b-4533-907f-5803ab96ead1"
+ }
+ FAKE_SHOW_ROUTER = {
+ "router": {
+ "admin_state_up": "true",
+ "availability_zone_hints": [],
+ "availability_zones": [
+ "nova"
+ ],
+ "created_at": "2018-03-19T19:17:04Z",
+ "description": "",
+ "distributed": "false",
+ "external_gateway_info": {
+ "enable_snat": "true",
+ "external_fixed_ips": [
+ {
+ "ip_address": "172.24.4.6",
+ "subnet_id": "b930d7f6-ceb7-40a0-8b81-a425dd994ccf"
+ },
+ {
+ "ip_address": "2001:db8::9",
+ "subnet_id": "0c56df5d-ace5-46c8-8f4c-45fa4e334d18"
+ }
+ ],
+ "network_id": "ae34051f-aa6c-4c75-abf5-50dc9ac99ef3"
+ },
+ "flavor_id": "f7b14d9a-b0dc-4fbe-bb14-a0f4970a69e0",
+ "ha": "false",
+ "id": "f8a44de0-fc8e-45df-93c7-f79bf3b01c95",
+ "name": "router1",
+ "revision_number": 1,
+ "routes": [
+ {
+ "destination": "179.24.1.0/24",
+ "nexthop": "172.24.3.99"
+ }
+ ],
+ "status": "ACTIVE",
+ "updated_at": "2018-03-19T19:17:22Z",
+ "project_id": "0bd18306d801447bb457a46252d82d13",
+ "tenant_id": "0bd18306d801447bb457a46252d82d13",
+ "service_type_id": "null",
+ "tags": ["tag1,tag2"],
+ "conntrack_helpers": []
+ }
+ }
+
def setUp(self):
super(TestRoutersClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
@@ -116,6 +177,15 @@
bytes_body,
name="another_router", admin_state_up="true", status=201)
+ def _test_show_router(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.show_router,
+ "tempest.lib.common.rest_client.RestClient.get",
+ self.FAKE_SHOW_ROUTER,
+ bytes_body,
+ 200,
+ router_id=self.FAKE_ROUTER_ID)
+
def _test_update_router(self, bytes_body=False):
self.check_service_client_function(
self.client.update_router,
@@ -125,6 +195,24 @@
router_id="8604a0de-7f6b-409a-a47c-a1cc7bc77b2e",
admin_state_up=False)
+ def _test_add_router_interface(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.add_router_interface,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ self.FAKE_INTERFACE,
+ bytes_body,
+ router_id=self.FAKE_ROUTER_ID,
+ **self.FAKE_INTERFACE_KWARGS)
+
+ def _test_remove_router_interface(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.remove_router_interface,
+ 'tempest.lib.common.rest_client.RestClient.put',
+ self.FAKE_INTERFACE,
+ bytes_body,
+ router_id=self.FAKE_ROUTER_ID,
+ **self.FAKE_INTERFACE_KWARGS)
+
def test_list_routers_with_str_body(self):
self._test_list_routers()
@@ -148,3 +236,21 @@
def test_update_router_with_bytes_body(self):
self._test_update_router(bytes_body=True)
+
+ def test_show_router_with_str_body(self):
+ self._test_show_router()
+
+ def test_show_router_with_bytes_body(self):
+ self._test_show_router(bytes_body=True)
+
+ def test_add_router_interface_with_str_body(self):
+ self._test_add_router_interface()
+
+ def test_add_router_interface_with_bytes_body(self):
+ self._test_add_router_interface(bytes_body=True)
+
+ def test_remove_router_interface_with_str_body(self):
+ self._test_remove_router_interface()
+
+ def test_remove_router_interface_with_bytes_body(self):
+ self._test_remove_router_interface(bytes_body=True)
diff --git a/tempest/tests/lib/services/network/test_subnetpools_client.py b/tempest/tests/lib/services/network/test_subnetpools_client.py
index 3abb438..2dfa25e 100644
--- a/tempest/tests/lib/services/network/test_subnetpools_client.py
+++ b/tempest/tests/lib/services/network/test_subnetpools_client.py
@@ -26,13 +26,13 @@
"subnetpools": [
{
"min_prefixlen": "64",
- "address_scope_id": None,
+ "address_scope_id": "null",
"default_prefixlen": "64",
"id": "03f761e6-eee0-43fc-a921-8acf64c14988",
"max_prefixlen": "64",
"name": "my-subnet-pool-ipv6",
- "default_quota": None,
- "is_default": False,
+ "default_quota": "null",
+ "is_default": "false",
"project_id": "9fadcee8aa7c40cdb2114fff7d569c08",
"tenant_id": "9fadcee8aa7c40cdb2114fff7d569c08",
"prefixes": [
@@ -40,19 +40,22 @@
"2001:db8::/63"
],
"ip_version": 6,
- "shared": False,
+ "shared": "false",
"description": "",
- "revision_number": 2
+ "created_at": "2016-03-08T20:19:41",
+ "updated_at": "2016-03-08T20:19:41",
+ "revision_number": 2,
+ "tags": ["tag1,tag2"]
},
{
"min_prefixlen": "24",
- "address_scope_id": None,
+ "address_scope_id": "null",
"default_prefixlen": "25",
"id": "f49a1319-423a-4ee6-ba54-1d95a4f6cc68",
"max_prefixlen": "30",
"name": "my-subnet-pool-ipv4",
- "default_quota": None,
- "is_default": False,
+ "default_quota": "null",
+ "is_default": "false",
"project_id": "9fadcee8aa7c40cdb2114fff7d569c08",
"tenant_id": "9fadcee8aa7c40cdb2114fff7d569c08",
"prefixes": [
@@ -60,9 +63,12 @@
"192.168.0.0/16"
],
"ip_version": 4,
- "shared": False,
+ "shared": "false",
"description": "",
- "revision_number": 2
+ "created_at": "2016-03-08T20:19:41",
+ "updated_at": "2016-03-08T20:19:41",
+ "revision_number": 2,
+ "tags": ["tag1,tag2"]
}
]
}
diff --git a/tempest/tests/lib/test_base.py b/tempest/tests/lib/test_base.py
index 2c16e1c..de6021c 100644
--- a/tempest/tests/lib/test_base.py
+++ b/tempest/tests/lib/test_base.py
@@ -48,7 +48,7 @@
@classmethod
def setUpClass(cls): # noqa
"""Simulate absence of super() call."""
- cls.orig_skip_exception = cls.skipException
+ pass
def setUp(self):
try:
diff --git a/tempest/tests/test_test.py b/tempest/tests/test_test.py
index 9aeedb3..cbb81e2 100644
--- a/tempest/tests/test_test.py
+++ b/tempest/tests/test_test.py
@@ -14,7 +14,7 @@
# under the License.
import os
-import sys
+import unittest
from unittest import mock
from oslo_config import cfg
@@ -24,6 +24,9 @@
from tempest import config
from tempest.lib.common import validation_resources as vr
from tempest.lib import exceptions as lib_exc
+from tempest.lib.services.compute import base_compute_client
+from tempest.lib.services.placement import base_placement_client
+from tempest.lib.services.volume import base_client as base_volume_client
from tempest import test
from tempest.tests import base
from tempest.tests import fake_config
@@ -31,12 +34,6 @@
from tempest.tests.lib.services import registry_fixture
-if sys.version_info >= (2, 7):
- import unittest
-else:
- import unittest2 as unittest
-
-
class LoggingTestResult(testtools.TestResult):
def __init__(self, log, *args, **kwargs):
@@ -749,3 +746,186 @@
self.test.fixtures_invoked)
found_exc = log[0][1][1]
self.assertIn(expected_exc, str(found_exc))
+
+
+class TestAPIMicroversionTest1(test.BaseTestCase):
+
+ @classmethod
+ def resource_setup(cls):
+ super(TestAPIMicroversionTest1, cls).resource_setup()
+ # Setting microvesions and checks that every tests
+ # of this class will have those microversion set
+ # on service clients requesting service APIs.
+ cls.setup_api_microversion_fixture(
+ compute_microversion='2.30',
+ volume_microversion='3.10',
+ placement_microversion='3.1')
+ # Check microvesion is set during resource_setup()
+ if base_compute_client.COMPUTE_MICROVERSION != '2.30':
+ raise testtools.TestCase.failureException(
+ "Microversion is not set in resource_setup method")
+ if base_volume_client.VOLUME_MICROVERSION != '3.10':
+ raise testtools.TestCase.failureException(
+ "Microversion is not set in resource_setup method")
+ if base_placement_client.PLACEMENT_MICROVERSION != '3.1':
+ raise testtools.TestCase.failureException(
+ "Microversion is not set in resource_setup method")
+
+ @classmethod
+ def resource_cleanup(cls):
+ super(TestAPIMicroversionTest1, cls).resource_cleanup()
+ # Check microversion is reset back to None in resource_cleanup()
+ if base_compute_client.COMPUTE_MICROVERSION is not None:
+ raise testtools.TestCase.failureException(
+ "Microversion is not reset to None in resource_cleanup method")
+ if base_volume_client.VOLUME_MICROVERSION is not None:
+ raise testtools.TestCase.failureException(
+ "Microversion is not reset to None in resource_cleanup method")
+ if base_placement_client.PLACEMENT_MICROVERSION is not None:
+ raise testtools.TestCase.failureException(
+ "Microversion is not reset to None in resource_cleanup method")
+
+ def setUp(self):
+ super(TestAPIMicroversionTest1, self).setUp()
+ # Check microversion is set in setUp method also.
+ self.assertEqual('2.30', base_compute_client.COMPUTE_MICROVERSION)
+ self.assertEqual('3.10', base_volume_client.VOLUME_MICROVERSION)
+ self.assertEqual('3.1', base_placement_client.PLACEMENT_MICROVERSION)
+
+ def tearDown(self):
+ super(TestAPIMicroversionTest1, self).tearDown()
+ # Check microversion is set in tearDown method also.
+ self.assertEqual('2.30', base_compute_client.COMPUTE_MICROVERSION)
+ self.assertEqual('3.10', base_volume_client.VOLUME_MICROVERSION)
+ self.assertEqual('3.1', base_placement_client.PLACEMENT_MICROVERSION)
+
+ def test_1(self):
+ self.assertEqual('2.30', base_compute_client.COMPUTE_MICROVERSION)
+ self.assertEqual('3.10', base_volume_client.VOLUME_MICROVERSION)
+ self.assertEqual('3.1', base_placement_client.PLACEMENT_MICROVERSION)
+
+ def test_2(self):
+ self.assertEqual('2.30', base_compute_client.COMPUTE_MICROVERSION)
+ self.assertEqual('3.10', base_volume_client.VOLUME_MICROVERSION)
+ self.assertEqual('3.1', base_placement_client.PLACEMENT_MICROVERSION)
+
+
+class TestAPIMicroversionTest2(test.BaseTestCase):
+
+ @classmethod
+ def resource_setup(cls):
+ super(TestAPIMicroversionTest2, cls).resource_setup()
+ # Setting microvesions different from what set in
+ # MicroversionTest1 and checks that every tests
+ # of this class will have the new microversion set
+ # on service clients requesting service APIs.
+ cls.setup_api_microversion_fixture(
+ compute_microversion='2.80',
+ volume_microversion='3.80',
+ placement_microversion='3.8')
+ # Check microvesion is set during resource_setup()
+ if base_compute_client.COMPUTE_MICROVERSION != '2.80':
+ raise testtools.TestCase.failureException(
+ "Microversion is not set in resource_setup method")
+ if base_volume_client.VOLUME_MICROVERSION != '3.80':
+ raise testtools.TestCase.failureException(
+ "Microversion is not set in resource_setup method")
+ if base_placement_client.PLACEMENT_MICROVERSION != '3.8':
+ raise testtools.TestCase.failureException(
+ "Microversion is not set in resource_setup method")
+
+ @classmethod
+ def resource_cleanup(cls):
+ super(TestAPIMicroversionTest2, cls).resource_cleanup()
+ # Check microversion is reset back to None in resource_cleanup()
+ if base_compute_client.COMPUTE_MICROVERSION is not None:
+ raise testtools.TestCase.failureException(
+ "Microversion is not reset to None in resource_cleanup method")
+ if base_volume_client.VOLUME_MICROVERSION is not None:
+ raise testtools.TestCase.failureException(
+ "Microversion is not reset to None in resource_cleanup method")
+ if base_placement_client.PLACEMENT_MICROVERSION is not None:
+ raise testtools.TestCase.failureException(
+ "Microversion is not reset to None in resource_cleanup method")
+
+ def setUp(self):
+ super(TestAPIMicroversionTest2, self).setUp()
+ # Check microversion is set in setUp method also.
+ self.assertEqual('2.80', base_compute_client.COMPUTE_MICROVERSION)
+ self.assertEqual('3.80', base_volume_client.VOLUME_MICROVERSION)
+ self.assertEqual('3.8', base_placement_client.PLACEMENT_MICROVERSION)
+
+ def tearDown(self):
+ super(TestAPIMicroversionTest2, self).tearDown()
+ # Check microversion is set in tearDown method also.
+ self.assertEqual('2.80', base_compute_client.COMPUTE_MICROVERSION)
+ self.assertEqual('3.80', base_volume_client.VOLUME_MICROVERSION)
+ self.assertEqual('3.8', base_placement_client.PLACEMENT_MICROVERSION)
+
+ def test_1(self):
+ self.assertEqual('2.80', base_compute_client.COMPUTE_MICROVERSION)
+ self.assertEqual('3.80', base_volume_client.VOLUME_MICROVERSION)
+ self.assertEqual('3.8', base_placement_client.PLACEMENT_MICROVERSION)
+
+ def test_2(self):
+ self.assertEqual('2.80', base_compute_client.COMPUTE_MICROVERSION)
+ self.assertEqual('3.80', base_volume_client.VOLUME_MICROVERSION)
+ self.assertEqual('3.8', base_placement_client.PLACEMENT_MICROVERSION)
+
+
+class TestAPIMicroversionTest3(test.BaseTestCase):
+
+ @classmethod
+ def resource_setup(cls):
+ super(TestAPIMicroversionTest3, cls).resource_setup()
+ # Not setting microversion for this test class so
+ # there should not be any micorversion set on service
+ # clients requesting services APIs.
+ # Check microvesion is not set during resource_setup()
+ if base_compute_client.COMPUTE_MICROVERSION is not None:
+ raise testtools.TestCase.failureException(
+ "Microversion is not set in resource_setup method")
+ if base_volume_client.VOLUME_MICROVERSION is not None:
+ raise testtools.TestCase.failureException(
+ "Microversion is not set in resource_setup method")
+ if base_placement_client.PLACEMENT_MICROVERSION is not None:
+ raise testtools.TestCase.failureException(
+ "Microversion is not set in resource_setup method")
+
+ @classmethod
+ def resource_cleanup(cls):
+ super(TestAPIMicroversionTest3, cls).resource_cleanup()
+ # Check microversion is set to None in resource_cleanup()
+ if base_compute_client.COMPUTE_MICROVERSION is not None:
+ raise testtools.TestCase.failureException(
+ "Microversion is not reset to None in resource_cleanup method")
+ if base_volume_client.VOLUME_MICROVERSION is not None:
+ raise testtools.TestCase.failureException(
+ "Microversion is not reset to None in resource_cleanup method")
+ if base_placement_client.PLACEMENT_MICROVERSION is not None:
+ raise testtools.TestCase.failureException(
+ "Microversion is not reset to None in resource_cleanup method")
+
+ def setUp(self):
+ super(TestAPIMicroversionTest3, self).setUp()
+ # Check microversion is None in setUp method also.
+ self.assertIsNone(base_compute_client.COMPUTE_MICROVERSION)
+ self.assertIsNone(base_volume_client.VOLUME_MICROVERSION)
+ self.assertIsNone(base_placement_client.PLACEMENT_MICROVERSION)
+
+ def tearDown(self):
+ super(TestAPIMicroversionTest3, self).tearDown()
+ # Check microversion is None in tearDown method also.
+ self.assertIsNone(base_compute_client.COMPUTE_MICROVERSION)
+ self.assertIsNone(base_volume_client.VOLUME_MICROVERSION)
+ self.assertIsNone(base_placement_client.PLACEMENT_MICROVERSION)
+
+ def test_1(self):
+ self.assertIsNone(base_compute_client.COMPUTE_MICROVERSION)
+ self.assertIsNone(base_volume_client.VOLUME_MICROVERSION)
+ self.assertIsNone(base_placement_client.PLACEMENT_MICROVERSION)
+
+ def test_2(self):
+ self.assertIsNone(base_compute_client.COMPUTE_MICROVERSION)
+ self.assertIsNone(base_volume_client.VOLUME_MICROVERSION)
+ self.assertIsNone(base_placement_client.PLACEMENT_MICROVERSION)
diff --git a/tox.ini b/tox.ini
index 7e155cd..b07fdaf 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = pep8,py36,py38,bashate,pip-check-reqs
+envlist = pep8,py36,py39,bashate,pip-check-reqs
minversion = 3.18.0
skipsdist = True
ignore_basepython_conflict = True
@@ -10,6 +10,7 @@
setenv =
VIRTUAL_ENV={envdir}
OS_TEST_PATH=./tempest/test_discover
+ OS_TEST_TIMEOUT={env:OS_TEST_TIMEOUT:1200}
deps =
-c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
-r{toxinidir}/requirements.txt
@@ -61,7 +62,6 @@
# 'all' includes slow tests
setenv =
{[tempestenv]setenv}
- OS_TEST_TIMEOUT={env:OS_TEST_TIMEOUT:1200}
deps = {[tempestenv]deps}
commands =
find . -type f -name "*.pyc" -delete
@@ -79,7 +79,6 @@
# 'all' includes slow tests
setenv =
{[tempestenv]setenv}
- OS_TEST_TIMEOUT={env:OS_TEST_TIMEOUT:1200}
basepython = {[tempestenv]basepython}
deps = {[tempestenv]deps}
commands =
@@ -93,7 +92,6 @@
# 'all' includes slow tests
setenv =
{[tempestenv]setenv}
- OS_TEST_TIMEOUT={env:OS_TEST_TIMEOUT:1200}
basepython = {[tempestenv]basepython}
deps = {[tempestenv]deps}
commands =
@@ -347,7 +345,8 @@
# E123 skipped because it is ignored by default in the default pep8
# E129 skipped because it is too limiting when combined with other rules
# W504 skipped because it is overeager and unnecessary
-ignore = E125,E123,E129,W504
+# H405 skipped because it arbitrarily forces doctring "title" lines
+ignore = E125,E123,E129,W504,H405
show-source = True
exclude = .git,.venv,.tox,dist,doc,*egg,build
enable-extensions = H106,H203,H904
diff --git a/zuul.d/integrated-gate.yaml b/zuul.d/integrated-gate.yaml
index 753b8fe..d35e25d 100644
--- a/zuul.d/integrated-gate.yaml
+++ b/zuul.d/integrated-gate.yaml
@@ -131,6 +131,8 @@
- job:
name: tempest-integrated-compute-centos-8-stream
parent: tempest-integrated-compute
+ # TODO(gmann): Make this job non voting until bug#1957941 if fixed.
+ voting: false
nodeset: devstack-single-node-centos-8-stream
branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train|ussuri|victoria)).*$
description: |
@@ -296,6 +298,22 @@
TEMPEST_VOLUME_TYPE: volumev2
- job:
+ name: tempest-centos8-stream-fips
+ parent: devstack-tempest
+ description: |
+ Integration testing for a FIPS enabled Centos 8 system
+ nodeset: devstack-single-node-centos-8-stream
+ pre-run: playbooks/enable-fips.yaml
+ vars:
+ tox_envlist: full
+ configure_swap_size: 4096
+ devstack_local_conf:
+ test-config:
+ "$TEMPEST_CONFIG":
+ validation:
+ ssh_key_type: 'ecdsa'
+
+- job:
name: tempest-pg-full
parent: tempest-full-py3
description: |
@@ -317,11 +335,15 @@
check:
jobs:
- grenade
+ - grenade-skip-level:
+ voting: false
- tempest-integrated-networking
+ - openstacksdk-functional-devstack
gate:
jobs:
- grenade
- tempest-integrated-networking
+ - openstacksdk-functional-devstack
- project-template:
name: integrated-gate-compute
@@ -332,12 +354,16 @@
run on Nova gate only.
check:
jobs:
+ - grenade-skip-level:
+ voting: false
- tempest-integrated-compute
- tempest-integrated-compute-centos-8-stream
+ - openstacksdk-functional-devstack
gate:
jobs:
- tempest-integrated-compute
- tempest-integrated-compute-centos-8-stream
+ - openstacksdk-functional-devstack
- project-template:
name: integrated-gate-placement
@@ -349,11 +375,15 @@
check:
jobs:
- grenade
+ - grenade-skip-level:
+ voting: false
- tempest-integrated-placement
+ - openstacksdk-functional-devstack
gate:
jobs:
- grenade
- tempest-integrated-placement
+ - openstacksdk-functional-devstack
- project-template:
name: integrated-gate-storage
@@ -365,11 +395,15 @@
check:
jobs:
- grenade
+ - grenade-skip-level:
+ voting: false
- tempest-integrated-storage
+ - openstacksdk-functional-devstack
gate:
jobs:
- grenade
- tempest-integrated-storage
+ - openstacksdk-functional-devstack
- project-template:
name: integrated-gate-object-storage
@@ -382,7 +416,9 @@
jobs:
- grenade
- tempest-integrated-object-storage
+ - openstacksdk-functional-devstack
gate:
jobs:
- grenade
- tempest-integrated-object-storage
+ - openstacksdk-functional-devstack
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index 0d6178d..731a72a 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -3,11 +3,15 @@
- check-requirements
- integrated-gate-py3
- openstack-cover-jobs
- - openstack-python3-xena-jobs
- publish-openstack-docs-pti
- release-notes-jobs-python3
check:
jobs:
+ - openstack-tox-pep8
+ - openstack-tox-py36
+ - openstack-tox-py37
+ - openstack-tox-py38
+ - openstack-tox-py39
- tempest-full-parallel:
# Define list of irrelevant files to use everywhere else
irrelevant-files: &tempest-irrelevant-files
@@ -39,8 +43,6 @@
irrelevant-files: *tempest-irrelevant-files
- tempest-full-ussuri-py3:
irrelevant-files: *tempest-irrelevant-files
- - tempest-full-train-py3:
- irrelevant-files: *tempest-irrelevant-files
- tempest-multinode-full-py3:
irrelevant-files: *tempest-irrelevant-files
- tempest-tox-plugin-sanity-check:
@@ -122,6 +124,11 @@
irrelevant-files: *tempest-irrelevant-files
gate:
jobs:
+ - openstack-tox-pep8
+ - openstack-tox-py36
+ - openstack-tox-py37
+ - openstack-tox-py38
+ - openstack-tox-py39
- tempest-slow-py3:
irrelevant-files: *tempest-irrelevant-files
- neutron-ovs-grenade-multinode:
@@ -152,7 +159,7 @@
irrelevant-files: *tempest-irrelevant-files
- tempest-pg-full:
irrelevant-files: *tempest-irrelevant-files
- - tempest-full-py3-opensuse15:
+ - tempest-centos8-stream-fips:
irrelevant-files: *tempest-irrelevant-files
periodic-stable:
jobs:
@@ -160,7 +167,6 @@
- tempest-full-wallaby-py3
- tempest-full-victoria-py3
- tempest-full-ussuri-py3
- - tempest-full-train-py3
periodic:
jobs:
- tempest-all
diff --git a/zuul.d/stable-jobs.yaml b/zuul.d/stable-jobs.yaml
index e682457..5cc0dd0 100644
--- a/zuul.d/stable-jobs.yaml
+++ b/zuul.d/stable-jobs.yaml
@@ -21,12 +21,6 @@
override-checkout: stable/ussuri
- job:
- name: tempest-full-train-py3
- parent: tempest-full-py3
- nodeset: openstack-single-node-bionic
- override-checkout: stable/train
-
-- job:
name: tempest-full-py3
parent: devstack-tempest
# This job version is with swift disabled on py3
@@ -184,3 +178,24 @@
subnode:
devstack_localrc:
USE_PYTHON3: true
+
+- job:
+ name: tempest-full-py3-opensuse15
+ parent: tempest-full-py3
+ nodeset: devstack-single-node-opensuse-15
+ description: |
+ Base integration test with Neutron networking and py36 running
+ on openSUSE Leap 15.x
+ voting: false
+ # This job is not used after stable/xena and can be
+ # removed once stable/xena is EOL.
+ branches:
+ - stable/pike
+ - stable/queens
+ - stable/rocky
+ - stable/stein
+ - stable/train
+ - stable/ussuri
+ - stable/victoria
+ - stable/wallaby
+ - stable/xena
diff --git a/zuul.d/tempest-specific.yaml b/zuul.d/tempest-specific.yaml
index 051d8b0..5b6b702 100644
--- a/zuul.d/tempest-specific.yaml
+++ b/zuul.d/tempest-specific.yaml
@@ -69,17 +69,10 @@
c-bak: false
- job:
- name: tempest-full-py3-opensuse15
- parent: tempest-full-py3
- nodeset: devstack-single-node-opensuse-15
- description: |
- Base integration test with Neutron networking and py36 running
- on openSUSE Leap 15.x
- voting: false
-
-- job:
name: tempest-full-py3-centos-8-stream
parent: tempest-full-py3
+ # TODO(gmann): Make this job non voting until bug#1957941 if fixed.
+ voting: false
nodeset: devstack-single-node-centos-8-stream
description: |
Base integration test with Neutron networking and py36 running