Lajos Katona | c87a06b | 2019-01-04 13:21:48 +0100 | [diff] [blame] | 1 | # Copyright (c) 2019 Ericsson |
| 2 | # |
| 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may |
| 4 | # not use this file except in compliance with the License. You may obtain |
| 5 | # a copy of the License at |
| 6 | # |
| 7 | # http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | # |
| 9 | # Unless required by applicable law or agreed to in writing, software |
| 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
| 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
| 12 | # License for the specific language governing permissions and limitations |
| 13 | # under the License. |
| 14 | |
elajkat | 064a340 | 2019-10-17 13:18:10 +0200 | [diff] [blame] | 15 | import testtools |
Lajos Katona | c87a06b | 2019-01-04 13:21:48 +0100 | [diff] [blame] | 16 | |
| 17 | from tempest.common import utils |
| 18 | from tempest.common import waiters |
| 19 | from tempest import config |
| 20 | from tempest.lib.common.utils import data_utils |
| 21 | from tempest.lib.common.utils import test_utils |
| 22 | from tempest.lib import decorators |
elajkat | 8b90b26 | 2020-07-15 16:11:56 +0200 | [diff] [blame] | 23 | from tempest.lib import exceptions as lib_exc |
Lajos Katona | c87a06b | 2019-01-04 13:21:48 +0100 | [diff] [blame] | 24 | from tempest.scenario import manager |
| 25 | |
| 26 | |
Lajos Katona | c87a06b | 2019-01-04 13:21:48 +0100 | [diff] [blame] | 27 | CONF = config.CONF |
| 28 | |
| 29 | |
Balazs Gibizer | bc0257c | 2021-08-26 16:41:51 +0200 | [diff] [blame] | 30 | class NetworkQoSPlacementTestBase(manager.NetworkScenarioTest): |
| 31 | """Base class for Network QoS testing |
| 32 | |
| 33 | Base class for testing Network QoS scenarios involving placement |
| 34 | resource allocations. |
| 35 | """ |
Lajos Katona | c87a06b | 2019-01-04 13:21:48 +0100 | [diff] [blame] | 36 | credentials = ['primary', 'admin'] |
Lajos Katona | c87a06b | 2019-01-04 13:21:48 +0100 | [diff] [blame] | 37 | # The feature QoS minimum bandwidth allocation in Placement API depends on |
| 38 | # Granular resource requests to GET /allocation_candidates and Support |
| 39 | # allocation candidates with nested resource providers features in |
| 40 | # Placement (see: https://specs.openstack.org/openstack/nova-specs/specs/ |
| 41 | # stein/approved/bandwidth-resource-provider.html#rest-api-impact) and this |
| 42 | # means that the minimum placement microversion is 1.29 |
| 43 | placement_min_microversion = '1.29' |
| 44 | placement_max_microversion = 'latest' |
| 45 | |
| 46 | # Nova rejects to boot VM with port which has resource_request field, below |
| 47 | # microversion 2.72 |
| 48 | compute_min_microversion = '2.72' |
| 49 | compute_max_microversion = 'latest' |
| 50 | |
Lajos Katona | c87a06b | 2019-01-04 13:21:48 +0100 | [diff] [blame] | 51 | INGRESS_DIRECTION = 'ingress' |
Balazs Gibizer | 770dc00 | 2021-09-15 15:58:25 +0200 | [diff] [blame] | 52 | EGRESS_DIRECTION = 'egress' |
Balazs Gibizer | f294b0d | 2021-09-29 16:16:44 +0200 | [diff] [blame] | 53 | ANY_DIRECTION = 'any' |
Balazs Gibizer | 770dc00 | 2021-09-15 15:58:25 +0200 | [diff] [blame] | 54 | INGRESS_RESOURCE_CLASS = "NET_BW_IGR_KILOBIT_PER_SEC" |
| 55 | EGRESS_RESOURCE_CLASS = "NET_BW_EGR_KILOBIT_PER_SEC" |
Lajos Katona | c87a06b | 2019-01-04 13:21:48 +0100 | [diff] [blame] | 56 | |
Lajos Katona | c87a06b | 2019-01-04 13:21:48 +0100 | [diff] [blame] | 57 | # For any realistic inventory value (that is inventory != MAX_INT) an |
| 58 | # allocation candidate request of MAX_INT is expected to be rejected, see: |
| 59 | # https://github.com/openstack/placement/blob/master/placement/ |
| 60 | # db/constants.py#L16 |
| 61 | PLACEMENT_MAX_INT = 0x7FFFFFFF |
| 62 | |
| 63 | @classmethod |
| 64 | def setup_clients(cls): |
Balazs Gibizer | bc0257c | 2021-08-26 16:41:51 +0200 | [diff] [blame] | 65 | super().setup_clients() |
Lajos Katona | c87a06b | 2019-01-04 13:21:48 +0100 | [diff] [blame] | 66 | cls.placement_client = cls.os_admin.placement_client |
| 67 | cls.networks_client = cls.os_admin.networks_client |
| 68 | cls.subnets_client = cls.os_admin.subnets_client |
elajkat | 8b90b26 | 2020-07-15 16:11:56 +0200 | [diff] [blame] | 69 | cls.ports_client = cls.os_primary.ports_client |
Lajos Katona | c87a06b | 2019-01-04 13:21:48 +0100 | [diff] [blame] | 70 | cls.routers_client = cls.os_adm.routers_client |
| 71 | cls.qos_client = cls.os_admin.qos_client |
| 72 | cls.qos_min_bw_client = cls.os_admin.qos_min_bw_client |
elajkat | 064a340 | 2019-10-17 13:18:10 +0200 | [diff] [blame] | 73 | cls.flavors_client = cls.os_adm.flavors_client |
Balazs Gibizer | 18accde | 2021-09-29 16:10:49 +0200 | [diff] [blame] | 74 | cls.servers_client = cls.os_primary.servers_client |
Lajos Katona | c87a06b | 2019-01-04 13:21:48 +0100 | [diff] [blame] | 75 | |
Balazs Gibizer | bc0257c | 2021-08-26 16:41:51 +0200 | [diff] [blame] | 76 | def _create_flavor_to_resize_to(self): |
| 77 | old_flavor = self.flavors_client.show_flavor( |
| 78 | CONF.compute.flavor_ref)['flavor'] |
| 79 | new_flavor = self.flavors_client.create_flavor(**{ |
| 80 | 'ram': old_flavor['ram'], |
| 81 | 'vcpus': old_flavor['vcpus'], |
yatinkarel | 115065a | 2022-02-01 15:19:39 +0530 | [diff] [blame] | 82 | 'name': old_flavor['name'] + 'extra-%s' % data_utils.rand_int_id(), |
Balazs Gibizer | bc0257c | 2021-08-26 16:41:51 +0200 | [diff] [blame] | 83 | 'disk': old_flavor['disk'] + 1 |
| 84 | })['flavor'] |
| 85 | self.addCleanup(test_utils.call_and_ignore_notfound_exc, |
| 86 | self.flavors_client.delete_flavor, new_flavor['id']) |
| 87 | return new_flavor |
| 88 | |
| 89 | |
| 90 | class MinBwAllocationPlacementTest(NetworkQoSPlacementTestBase): |
| 91 | |
| 92 | required_extensions = ['port-resource-request', |
| 93 | 'qos', |
| 94 | 'qos-bw-minimum-ingress'] |
| 95 | |
| 96 | SMALLEST_POSSIBLE_BW = 1 |
| 97 | BANDWIDTH_1 = 1000 |
| 98 | BANDWIDTH_2 = 2000 |
| 99 | |
Lajos Katona | c87a06b | 2019-01-04 13:21:48 +0100 | [diff] [blame] | 100 | @classmethod |
| 101 | def skip_checks(cls): |
| 102 | super(MinBwAllocationPlacementTest, cls).skip_checks() |
| 103 | if not CONF.network_feature_enabled.qos_placement_physnet: |
| 104 | msg = "Skipped as no physnet is available in config for " \ |
| 105 | "placement based QoS allocation." |
| 106 | raise cls.skipException(msg) |
| 107 | |
elajkat | 064a340 | 2019-10-17 13:18:10 +0200 | [diff] [blame] | 108 | def setUp(self): |
| 109 | super(MinBwAllocationPlacementTest, self).setUp() |
| 110 | self._check_if_allocation_is_possible() |
elajkat | 064a340 | 2019-10-17 13:18:10 +0200 | [diff] [blame] | 111 | |
Balazs Gibizer | 770dc00 | 2021-09-15 15:58:25 +0200 | [diff] [blame] | 112 | def _create_policy_and_min_bw_rule( |
| 113 | self, name_prefix, min_kbps, direction="ingress" |
| 114 | ): |
Lajos Katona | c87a06b | 2019-01-04 13:21:48 +0100 | [diff] [blame] | 115 | policy = self.qos_client.create_qos_policy( |
| 116 | name=data_utils.rand_name(name_prefix), |
| 117 | shared=True)['policy'] |
| 118 | self.addCleanup(test_utils.call_and_ignore_notfound_exc, |
| 119 | self.qos_client.delete_qos_policy, policy['id']) |
| 120 | rule = self.qos_min_bw_client.create_minimum_bandwidth_rule( |
| 121 | policy['id'], |
| 122 | **{ |
| 123 | 'min_kbps': min_kbps, |
Balazs Gibizer | 770dc00 | 2021-09-15 15:58:25 +0200 | [diff] [blame] | 124 | 'direction': direction, |
Lajos Katona | c87a06b | 2019-01-04 13:21:48 +0100 | [diff] [blame] | 125 | })['minimum_bandwidth_rule'] |
| 126 | self.addCleanup( |
| 127 | test_utils.call_and_ignore_notfound_exc, |
| 128 | self.qos_min_bw_client.delete_minimum_bandwidth_rule, policy['id'], |
| 129 | rule['id']) |
| 130 | |
| 131 | return policy |
| 132 | |
elajkat | 8b90b26 | 2020-07-15 16:11:56 +0200 | [diff] [blame] | 133 | def _create_qos_basic_policies(self): |
Lajos Katona | c87a06b | 2019-01-04 13:21:48 +0100 | [diff] [blame] | 134 | self.qos_policy_valid = self._create_policy_and_min_bw_rule( |
| 135 | name_prefix='test_policy_valid', |
| 136 | min_kbps=self.SMALLEST_POSSIBLE_BW) |
| 137 | self.qos_policy_not_valid = self._create_policy_and_min_bw_rule( |
| 138 | name_prefix='test_policy_not_valid', |
| 139 | min_kbps=self.PLACEMENT_MAX_INT) |
| 140 | |
elajkat | 8b90b26 | 2020-07-15 16:11:56 +0200 | [diff] [blame] | 141 | def _create_qos_policies_from_life(self): |
| 142 | # For tempest-slow the max bandwidth configured is 1000000, |
| 143 | # https://opendev.org/openstack/tempest/src/branch/master/ |
| 144 | # .zuul.yaml#L416-L420 |
| 145 | self.qos_policy_1 = self._create_policy_and_min_bw_rule( |
| 146 | name_prefix='test_policy_1', |
| 147 | min_kbps=self.BANDWIDTH_1 |
| 148 | ) |
| 149 | self.qos_policy_2 = self._create_policy_and_min_bw_rule( |
| 150 | name_prefix='test_policy_2', |
| 151 | min_kbps=self.BANDWIDTH_2 |
| 152 | ) |
| 153 | |
| 154 | def _create_network_and_qos_policies(self, policy_method): |
Lajos Katona | c87a06b | 2019-01-04 13:21:48 +0100 | [diff] [blame] | 155 | physnet_name = CONF.network_feature_enabled.qos_placement_physnet |
| 156 | base_segm = \ |
| 157 | CONF.network_feature_enabled.provider_net_base_segmentation_id |
| 158 | |
Ghanshyam Mann | 071d154 | 2021-03-24 19:10:47 -0500 | [diff] [blame] | 159 | self.prov_network, _, _ = self.setup_network_subnet_with_router( |
Lajos Katona | c87a06b | 2019-01-04 13:21:48 +0100 | [diff] [blame] | 160 | networks_client=self.networks_client, |
| 161 | routers_client=self.routers_client, |
| 162 | subnets_client=self.subnets_client, |
| 163 | **{ |
| 164 | 'shared': True, |
| 165 | 'provider:network_type': 'vlan', |
| 166 | 'provider:physical_network': physnet_name, |
| 167 | 'provider:segmentation_id': base_segm |
| 168 | }) |
| 169 | |
elajkat | 8b90b26 | 2020-07-15 16:11:56 +0200 | [diff] [blame] | 170 | policy_method() |
Lajos Katona | c87a06b | 2019-01-04 13:21:48 +0100 | [diff] [blame] | 171 | |
| 172 | def _check_if_allocation_is_possible(self): |
| 173 | alloc_candidates = self.placement_client.list_allocation_candidates( |
Balazs Gibizer | 770dc00 | 2021-09-15 15:58:25 +0200 | [diff] [blame] | 174 | resources1='%s:%s' % (self.INGRESS_RESOURCE_CLASS, |
Lajos Katona | c87a06b | 2019-01-04 13:21:48 +0100 | [diff] [blame] | 175 | self.SMALLEST_POSSIBLE_BW)) |
| 176 | if len(alloc_candidates['provider_summaries']) == 0: |
Balazs Gibizer | a5f523b | 2021-08-26 13:38:19 +0200 | [diff] [blame] | 177 | self.fail('No allocation candidates are available for %s:%s' % |
Balazs Gibizer | 770dc00 | 2021-09-15 15:58:25 +0200 | [diff] [blame] | 178 | (self.INGRESS_RESOURCE_CLASS, self.SMALLEST_POSSIBLE_BW)) |
Lajos Katona | c87a06b | 2019-01-04 13:21:48 +0100 | [diff] [blame] | 179 | |
| 180 | # Just to be sure check with impossible high (placement max_int), |
| 181 | # allocation |
| 182 | alloc_candidates = self.placement_client.list_allocation_candidates( |
Balazs Gibizer | 770dc00 | 2021-09-15 15:58:25 +0200 | [diff] [blame] | 183 | resources1='%s:%s' % (self.INGRESS_RESOURCE_CLASS, |
Lajos Katona | c87a06b | 2019-01-04 13:21:48 +0100 | [diff] [blame] | 184 | self.PLACEMENT_MAX_INT)) |
| 185 | if len(alloc_candidates['provider_summaries']) != 0: |
| 186 | self.fail('For %s:%s there should be no available candidate!' % |
Balazs Gibizer | 770dc00 | 2021-09-15 15:58:25 +0200 | [diff] [blame] | 187 | (self.INGRESS_RESOURCE_CLASS, self.PLACEMENT_MAX_INT)) |
Lajos Katona | c87a06b | 2019-01-04 13:21:48 +0100 | [diff] [blame] | 188 | |
elajkat | 064a340 | 2019-10-17 13:18:10 +0200 | [diff] [blame] | 189 | def _boot_vm_with_min_bw(self, qos_policy_id, status='ACTIVE'): |
| 190 | wait_until = (None if status == 'ERROR' else status) |
| 191 | port = self.create_port( |
| 192 | self.prov_network['id'], qos_policy_id=qos_policy_id) |
| 193 | |
| 194 | server = self.create_server(networks=[{'port': port['id']}], |
| 195 | wait_until=wait_until) |
| 196 | waiters.wait_for_server_status( |
Balazs Gibizer | 18accde | 2021-09-29 16:10:49 +0200 | [diff] [blame] | 197 | client=self.servers_client, server_id=server['id'], |
elajkat | 064a340 | 2019-10-17 13:18:10 +0200 | [diff] [blame] | 198 | status=status, ready_wait=False, raise_on_error=False) |
| 199 | return server, port |
| 200 | |
Balazs Gibizer | 770dc00 | 2021-09-15 15:58:25 +0200 | [diff] [blame] | 201 | def _assert_allocation_is_as_expected( |
| 202 | self, consumer, port_ids, min_kbps=SMALLEST_POSSIBLE_BW, |
| 203 | expected_rc=NetworkQoSPlacementTestBase.INGRESS_RESOURCE_CLASS, |
| 204 | ): |
elajkat | 8b90b26 | 2020-07-15 16:11:56 +0200 | [diff] [blame] | 205 | allocations = self.placement_client.list_allocations( |
| 206 | consumer)['allocations'] |
| 207 | self.assertGreater(len(allocations), 0) |
elajkat | 064a340 | 2019-10-17 13:18:10 +0200 | [diff] [blame] | 208 | bw_resource_in_alloc = False |
Balazs Gibizer | 20514ef | 2021-09-15 12:00:20 +0200 | [diff] [blame] | 209 | allocation_rp = None |
elajkat | 8b90b26 | 2020-07-15 16:11:56 +0200 | [diff] [blame] | 210 | for rp, resources in allocations.items(): |
Balazs Gibizer | 770dc00 | 2021-09-15 15:58:25 +0200 | [diff] [blame] | 211 | if expected_rc in resources['resources']: |
elajkat | 8b90b26 | 2020-07-15 16:11:56 +0200 | [diff] [blame] | 212 | self.assertEqual( |
| 213 | min_kbps, |
Balazs Gibizer | 770dc00 | 2021-09-15 15:58:25 +0200 | [diff] [blame] | 214 | resources['resources'][expected_rc]) |
elajkat | 064a340 | 2019-10-17 13:18:10 +0200 | [diff] [blame] | 215 | bw_resource_in_alloc = True |
| 216 | allocation_rp = rp |
elajkat | 8b90b26 | 2020-07-15 16:11:56 +0200 | [diff] [blame] | 217 | if min_kbps: |
Balazs Gibizer | 770dc00 | 2021-09-15 15:58:25 +0200 | [diff] [blame] | 218 | self.assertTrue( |
| 219 | bw_resource_in_alloc, |
| 220 | f"expected {min_kbps} bandwidth allocation from {expected_rc} " |
| 221 | f"but instance has allocation {allocations} instead." |
| 222 | ) |
elajkat | 064a340 | 2019-10-17 13:18:10 +0200 | [diff] [blame] | 223 | |
elajkat | 8b90b26 | 2020-07-15 16:11:56 +0200 | [diff] [blame] | 224 | # Check binding_profile of the port is not empty and equals with |
| 225 | # the rp uuid |
| 226 | for port_id in port_ids: |
| 227 | port = self.os_admin.ports_client.show_port(port_id) |
Balazs Gibizer | 20514ef | 2021-09-15 12:00:20 +0200 | [diff] [blame] | 228 | port_binding_alloc = port['port']['binding:profile'][ |
| 229 | 'allocation'] |
| 230 | # NOTE(gibi): the format of the allocation key depends on the |
| 231 | # existence of port-resource-request-groups API extension. |
| 232 | # TODO(gibi): drop the else branch once tempest does not need |
| 233 | # to support Xena release any more. |
| 234 | if utils.is_extension_enabled( |
| 235 | 'port-resource-request-groups', 'network'): |
| 236 | self.assertEqual( |
| 237 | {allocation_rp}, |
| 238 | set(port_binding_alloc.values())) |
| 239 | else: |
| 240 | self.assertEqual(allocation_rp, port_binding_alloc) |
elajkat | 064a340 | 2019-10-17 13:18:10 +0200 | [diff] [blame] | 241 | |
Lajos Katona | c87a06b | 2019-01-04 13:21:48 +0100 | [diff] [blame] | 242 | @decorators.idempotent_id('78625d92-212c-400e-8695-dd51706858b8') |
Lajos Katona | c87a06b | 2019-01-04 13:21:48 +0100 | [diff] [blame] | 243 | @utils.services('compute', 'network') |
| 244 | def test_qos_min_bw_allocation_basic(self): |
| 245 | """"Basic scenario with QoS min bw allocation in placement. |
| 246 | |
| 247 | Steps: |
| 248 | * Create prerequisites: |
| 249 | ** VLAN type provider network with subnet. |
| 250 | ** valid QoS policy with minimum bandwidth rule with min_kbps=1 |
| 251 | (This is a simplification to skip the checks in placement for |
| 252 | detecting the resource provider tree and inventories, as if |
| 253 | bandwidth resource is available 1 kbs will be available). |
| 254 | ** invalid QoS policy with minimum bandwidth rule with |
| 255 | min_kbs=max integer from placement (this is a simplification again |
| 256 | to avoid detection of RP tress and inventories, as placement will |
| 257 | reject such big allocation). |
| 258 | * Create port with valid QoS policy, and boot VM with that, it should |
| 259 | pass. |
| 260 | * Create port with invalid QoS policy, and try to boot VM with that, |
| 261 | it should fail. |
| 262 | """ |
elajkat | 8b90b26 | 2020-07-15 16:11:56 +0200 | [diff] [blame] | 263 | self._create_network_and_qos_policies(self._create_qos_basic_policies) |
elajkat | 064a340 | 2019-10-17 13:18:10 +0200 | [diff] [blame] | 264 | server1, valid_port = self._boot_vm_with_min_bw( |
| 265 | qos_policy_id=self.qos_policy_valid['id']) |
elajkat | 8b90b26 | 2020-07-15 16:11:56 +0200 | [diff] [blame] | 266 | self._assert_allocation_is_as_expected(server1['id'], |
| 267 | [valid_port['id']]) |
Lajos Katona | c87a06b | 2019-01-04 13:21:48 +0100 | [diff] [blame] | 268 | |
elajkat | 064a340 | 2019-10-17 13:18:10 +0200 | [diff] [blame] | 269 | server2, not_valid_port = self._boot_vm_with_min_bw( |
| 270 | self.qos_policy_not_valid['id'], status='ERROR') |
Lajos Katona | c87a06b | 2019-01-04 13:21:48 +0100 | [diff] [blame] | 271 | allocations = self.placement_client.list_allocations(server2['id']) |
| 272 | |
| 273 | self.assertEqual(0, len(allocations['allocations'])) |
| 274 | server2 = self.servers_client.show_server(server2['id']) |
| 275 | self.assertIn('fault', server2['server']) |
| 276 | self.assertIn('No valid host', server2['server']['fault']['message']) |
elajkat | e4f2820 | 2019-10-24 12:56:42 +0200 | [diff] [blame] | 277 | # Check that binding_profile of the port is empty |
| 278 | port = self.os_admin.ports_client.show_port(not_valid_port['id']) |
| 279 | self.assertEqual(0, len(port['port']['binding:profile'])) |
elajkat | 064a340 | 2019-10-17 13:18:10 +0200 | [diff] [blame] | 280 | |
| 281 | @decorators.idempotent_id('8a98150c-a506-49a5-96c6-73a5e7b04ada') |
| 282 | @testtools.skipUnless(CONF.compute_feature_enabled.cold_migration, |
| 283 | 'Cold migration is not available.') |
| 284 | @testtools.skipUnless(CONF.compute.min_compute_nodes > 1, |
| 285 | 'Less than 2 compute nodes, skipping multinode ' |
| 286 | 'tests.') |
| 287 | @utils.services('compute', 'network') |
| 288 | def test_migrate_with_qos_min_bw_allocation(self): |
| 289 | """Scenario to migrate VM with QoS min bw allocation in placement |
| 290 | |
| 291 | Boot a VM like in test_qos_min_bw_allocation_basic, do the same |
| 292 | checks, and |
| 293 | * migrate the server |
| 294 | * confirm the resize, if the VM state is VERIFY_RESIZE |
| 295 | * If the VM goes to ACTIVE state check that allocations are as |
| 296 | expected. |
| 297 | """ |
elajkat | 8b90b26 | 2020-07-15 16:11:56 +0200 | [diff] [blame] | 298 | self._create_network_and_qos_policies(self._create_qos_basic_policies) |
elajkat | 064a340 | 2019-10-17 13:18:10 +0200 | [diff] [blame] | 299 | server, valid_port = self._boot_vm_with_min_bw( |
| 300 | qos_policy_id=self.qos_policy_valid['id']) |
elajkat | 8b90b26 | 2020-07-15 16:11:56 +0200 | [diff] [blame] | 301 | self._assert_allocation_is_as_expected(server['id'], |
| 302 | [valid_port['id']]) |
elajkat | 064a340 | 2019-10-17 13:18:10 +0200 | [diff] [blame] | 303 | |
Balazs Gibizer | 18accde | 2021-09-29 16:10:49 +0200 | [diff] [blame] | 304 | self.os_adm.servers_client.migrate_server(server_id=server['id']) |
elajkat | 064a340 | 2019-10-17 13:18:10 +0200 | [diff] [blame] | 305 | waiters.wait_for_server_status( |
Balazs Gibizer | 18accde | 2021-09-29 16:10:49 +0200 | [diff] [blame] | 306 | client=self.servers_client, server_id=server['id'], |
elajkat | 064a340 | 2019-10-17 13:18:10 +0200 | [diff] [blame] | 307 | status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False) |
elajkat | 064a340 | 2019-10-17 13:18:10 +0200 | [diff] [blame] | 308 | |
| 309 | # TODO(lajoskatona): Check that the allocations are ok for the |
| 310 | # migration? |
elajkat | 8b90b26 | 2020-07-15 16:11:56 +0200 | [diff] [blame] | 311 | self._assert_allocation_is_as_expected(server['id'], |
| 312 | [valid_port['id']]) |
elajkat | 064a340 | 2019-10-17 13:18:10 +0200 | [diff] [blame] | 313 | |
Balazs Gibizer | 18accde | 2021-09-29 16:10:49 +0200 | [diff] [blame] | 314 | self.os_adm.servers_client.confirm_resize_server( |
| 315 | server_id=server['id']) |
elajkat | 064a340 | 2019-10-17 13:18:10 +0200 | [diff] [blame] | 316 | waiters.wait_for_server_status( |
Balazs Gibizer | 18accde | 2021-09-29 16:10:49 +0200 | [diff] [blame] | 317 | client=self.servers_client, server_id=server['id'], |
elajkat | 064a340 | 2019-10-17 13:18:10 +0200 | [diff] [blame] | 318 | status='ACTIVE', ready_wait=False, raise_on_error=True) |
elajkat | 8b90b26 | 2020-07-15 16:11:56 +0200 | [diff] [blame] | 319 | self._assert_allocation_is_as_expected(server['id'], |
| 320 | [valid_port['id']]) |
elajkat | 064a340 | 2019-10-17 13:18:10 +0200 | [diff] [blame] | 321 | |
| 322 | @decorators.idempotent_id('c29e7fd3-035d-4993-880f-70819847683f') |
| 323 | @testtools.skipUnless(CONF.compute_feature_enabled.resize, |
| 324 | 'Resize not available.') |
| 325 | @utils.services('compute', 'network') |
| 326 | def test_resize_with_qos_min_bw_allocation(self): |
| 327 | """Scenario to resize VM with QoS min bw allocation in placement. |
| 328 | |
| 329 | Boot a VM like in test_qos_min_bw_allocation_basic, do the same |
| 330 | checks, and |
| 331 | * resize the server with new flavor |
| 332 | * confirm the resize, if the VM state is VERIFY_RESIZE |
| 333 | * If the VM goes to ACTIVE state check that allocations are as |
| 334 | expected. |
| 335 | """ |
elajkat | 8b90b26 | 2020-07-15 16:11:56 +0200 | [diff] [blame] | 336 | self._create_network_and_qos_policies(self._create_qos_basic_policies) |
elajkat | 064a340 | 2019-10-17 13:18:10 +0200 | [diff] [blame] | 337 | server, valid_port = self._boot_vm_with_min_bw( |
| 338 | qos_policy_id=self.qos_policy_valid['id']) |
elajkat | 8b90b26 | 2020-07-15 16:11:56 +0200 | [diff] [blame] | 339 | self._assert_allocation_is_as_expected(server['id'], |
| 340 | [valid_port['id']]) |
elajkat | 064a340 | 2019-10-17 13:18:10 +0200 | [diff] [blame] | 341 | |
Balazs Gibizer | bc0257c | 2021-08-26 16:41:51 +0200 | [diff] [blame] | 342 | new_flavor = self._create_flavor_to_resize_to() |
elajkat | 064a340 | 2019-10-17 13:18:10 +0200 | [diff] [blame] | 343 | |
| 344 | self.servers_client.resize_server( |
| 345 | server_id=server['id'], flavor_ref=new_flavor['id']) |
| 346 | waiters.wait_for_server_status( |
Balazs Gibizer | 18accde | 2021-09-29 16:10:49 +0200 | [diff] [blame] | 347 | client=self.servers_client, server_id=server['id'], |
elajkat | 064a340 | 2019-10-17 13:18:10 +0200 | [diff] [blame] | 348 | status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False) |
elajkat | 064a340 | 2019-10-17 13:18:10 +0200 | [diff] [blame] | 349 | |
| 350 | # TODO(lajoskatona): Check that the allocations are ok for the |
| 351 | # migration? |
elajkat | 8b90b26 | 2020-07-15 16:11:56 +0200 | [diff] [blame] | 352 | self._assert_allocation_is_as_expected(server['id'], |
| 353 | [valid_port['id']]) |
elajkat | 064a340 | 2019-10-17 13:18:10 +0200 | [diff] [blame] | 354 | |
| 355 | self.servers_client.confirm_resize_server(server_id=server['id']) |
| 356 | waiters.wait_for_server_status( |
Balazs Gibizer | 18accde | 2021-09-29 16:10:49 +0200 | [diff] [blame] | 357 | client=self.servers_client, server_id=server['id'], |
elajkat | 064a340 | 2019-10-17 13:18:10 +0200 | [diff] [blame] | 358 | status='ACTIVE', ready_wait=False, raise_on_error=True) |
elajkat | 8b90b26 | 2020-07-15 16:11:56 +0200 | [diff] [blame] | 359 | self._assert_allocation_is_as_expected(server['id'], |
| 360 | [valid_port['id']]) |
| 361 | |
| 362 | @decorators.idempotent_id('79fdaa1c-df62-4738-a0f0-1cff9dc415f6') |
| 363 | @utils.services('compute', 'network') |
| 364 | def test_qos_min_bw_allocation_update_policy(self): |
| 365 | """Test the update of QoS policy on bound port |
| 366 | |
| 367 | Related RFE in neutron: #1882804 |
| 368 | The scenario is the following: |
| 369 | * Have a port with QoS policy and minimum bandwidth rule. |
| 370 | * Boot a VM with the port. |
| 371 | * Update the port with a new policy with different minimum bandwidth |
| 372 | values. |
| 373 | * The allocation on placement side should be according to the new |
| 374 | rules. |
| 375 | """ |
| 376 | if not utils.is_network_feature_enabled('update_port_qos'): |
| 377 | raise self.skipException("update_port_qos feature is not enabled") |
| 378 | |
| 379 | self._create_network_and_qos_policies( |
| 380 | self._create_qos_policies_from_life) |
| 381 | |
| 382 | port = self.create_port( |
| 383 | self.prov_network['id'], qos_policy_id=self.qos_policy_1['id']) |
| 384 | |
| 385 | server1 = self.create_server( |
| 386 | networks=[{'port': port['id']}]) |
| 387 | |
| 388 | self._assert_allocation_is_as_expected(server1['id'], [port['id']], |
| 389 | self.BANDWIDTH_1) |
| 390 | |
| 391 | self.ports_client.update_port( |
| 392 | port['id'], |
| 393 | **{'qos_policy_id': self.qos_policy_2['id']}) |
| 394 | self._assert_allocation_is_as_expected(server1['id'], [port['id']], |
| 395 | self.BANDWIDTH_2) |
| 396 | |
| 397 | # I changed my mind |
| 398 | self.ports_client.update_port( |
| 399 | port['id'], |
| 400 | **{'qos_policy_id': self.qos_policy_1['id']}) |
| 401 | self._assert_allocation_is_as_expected(server1['id'], [port['id']], |
| 402 | self.BANDWIDTH_1) |
| 403 | |
| 404 | # bad request.... |
| 405 | self.qos_policy_not_valid = self._create_policy_and_min_bw_rule( |
| 406 | name_prefix='test_policy_not_valid', |
| 407 | min_kbps=self.PLACEMENT_MAX_INT) |
| 408 | port_orig = self.ports_client.show_port(port['id'])['port'] |
| 409 | self.assertRaises( |
| 410 | lib_exc.Conflict, |
| 411 | self.ports_client.update_port, |
| 412 | port['id'], **{'qos_policy_id': self.qos_policy_not_valid['id']}) |
| 413 | self._assert_allocation_is_as_expected(server1['id'], [port['id']], |
| 414 | self.BANDWIDTH_1) |
| 415 | |
| 416 | port_upd = self.ports_client.show_port(port['id'])['port'] |
| 417 | self.assertEqual(port_orig['qos_policy_id'], |
| 418 | port_upd['qos_policy_id']) |
| 419 | self.assertEqual(self.qos_policy_1['id'], port_upd['qos_policy_id']) |
| 420 | |
| 421 | @decorators.idempotent_id('9cfc3bb8-f433-4c91-87b6-747cadc8958a') |
| 422 | @utils.services('compute', 'network') |
| 423 | def test_qos_min_bw_allocation_update_policy_from_zero(self): |
| 424 | """Test port without QoS policy to have QoS policy |
| 425 | |
| 426 | This scenario checks if updating a port without QoS policy to |
| 427 | have QoS policy with minimum_bandwidth rule succeeds only on |
| 428 | controlplane, but placement allocation remains 0. |
| 429 | """ |
| 430 | if not utils.is_network_feature_enabled('update_port_qos'): |
| 431 | raise self.skipException("update_port_qos feature is not enabled") |
| 432 | |
| 433 | self._create_network_and_qos_policies( |
| 434 | self._create_qos_policies_from_life) |
| 435 | |
| 436 | port = self.create_port(self.prov_network['id']) |
| 437 | |
| 438 | server1 = self.create_server( |
| 439 | networks=[{'port': port['id']}]) |
| 440 | |
| 441 | self._assert_allocation_is_as_expected(server1['id'], [port['id']], 0) |
| 442 | |
| 443 | self.ports_client.update_port( |
| 444 | port['id'], **{'qos_policy_id': self.qos_policy_2['id']}) |
| 445 | self._assert_allocation_is_as_expected(server1['id'], [port['id']], 0) |
| 446 | |
| 447 | @decorators.idempotent_id('a9725a70-1d28-4e3b-ae0e-450abc235962') |
| 448 | @utils.services('compute', 'network') |
| 449 | def test_qos_min_bw_allocation_update_policy_to_zero(self): |
| 450 | """Test port with QoS policy to remove QoS policy |
| 451 | |
| 452 | In this scenario port with QoS minimum_bandwidth rule update to |
| 453 | remove QoS policy results in 0 placement allocation. |
| 454 | """ |
| 455 | if not utils.is_network_feature_enabled('update_port_qos'): |
| 456 | raise self.skipException("update_port_qos feature is not enabled") |
| 457 | |
| 458 | self._create_network_and_qos_policies( |
| 459 | self._create_qos_policies_from_life) |
| 460 | |
| 461 | port = self.create_port( |
| 462 | self.prov_network['id'], qos_policy_id=self.qos_policy_1['id']) |
| 463 | |
| 464 | server1 = self.create_server( |
| 465 | networks=[{'port': port['id']}]) |
| 466 | self._assert_allocation_is_as_expected(server1['id'], [port['id']], |
| 467 | self.BANDWIDTH_1) |
| 468 | |
| 469 | self.ports_client.update_port( |
| 470 | port['id'], |
| 471 | **{'qos_policy_id': None}) |
| 472 | self._assert_allocation_is_as_expected(server1['id'], [port['id']], 0) |
| 473 | |
| 474 | @decorators.idempotent_id('756ced7f-6f1a-43e7-a851-2fcfc16f3dd7') |
| 475 | @utils.services('compute', 'network') |
| 476 | def test_qos_min_bw_allocation_update_with_multiple_ports(self): |
| 477 | if not utils.is_network_feature_enabled('update_port_qos'): |
| 478 | raise self.skipException("update_port_qos feature is not enabled") |
| 479 | |
| 480 | self._create_network_and_qos_policies( |
| 481 | self._create_qos_policies_from_life) |
| 482 | |
| 483 | port1 = self.create_port( |
| 484 | self.prov_network['id'], qos_policy_id=self.qos_policy_1['id']) |
| 485 | port2 = self.create_port( |
| 486 | self.prov_network['id'], qos_policy_id=self.qos_policy_2['id']) |
| 487 | |
| 488 | server1 = self.create_server( |
| 489 | networks=[{'port': port1['id']}, {'port': port2['id']}]) |
| 490 | self._assert_allocation_is_as_expected( |
| 491 | server1['id'], [port1['id'], port2['id']], |
| 492 | self.BANDWIDTH_1 + self.BANDWIDTH_2) |
| 493 | |
| 494 | self.ports_client.update_port( |
| 495 | port1['id'], |
| 496 | **{'qos_policy_id': self.qos_policy_2['id']}) |
| 497 | self._assert_allocation_is_as_expected( |
| 498 | server1['id'], [port1['id'], port2['id']], |
| 499 | 2 * self.BANDWIDTH_2) |
| 500 | |
| 501 | @decorators.idempotent_id('0805779e-e03c-44fb-900f-ce97a790653b') |
| 502 | @utils.services('compute', 'network') |
| 503 | def test_empty_update(self): |
| 504 | if not utils.is_network_feature_enabled('update_port_qos'): |
| 505 | raise self.skipException("update_port_qos feature is not enabled") |
| 506 | |
| 507 | self._create_network_and_qos_policies( |
| 508 | self._create_qos_policies_from_life) |
| 509 | |
| 510 | port = self.create_port( |
| 511 | self.prov_network['id'], qos_policy_id=self.qos_policy_1['id']) |
| 512 | |
| 513 | server1 = self.create_server( |
| 514 | networks=[{'port': port['id']}]) |
| 515 | self._assert_allocation_is_as_expected(server1['id'], [port['id']], |
| 516 | self.BANDWIDTH_1) |
| 517 | self.ports_client.update_port( |
| 518 | port['id'], |
| 519 | **{'description': 'foo'}) |
| 520 | self._assert_allocation_is_as_expected(server1['id'], [port['id']], |
| 521 | self.BANDWIDTH_1) |
Balazs Gibizer | f294b0d | 2021-09-29 16:16:44 +0200 | [diff] [blame] | 522 | |
Balazs Gibizer | 770dc00 | 2021-09-15 15:58:25 +0200 | [diff] [blame] | 523 | @decorators.idempotent_id('372b2728-cfed-469a-b5f6-b75779e1ccbe') |
| 524 | @utils.services('compute', 'network') |
| 525 | def test_qos_min_bw_allocation_update_policy_direction_change(self): |
| 526 | """Test QoS min bw direction change on a bound port |
| 527 | |
| 528 | Related RFE in neutron: #1882804 |
| 529 | The scenario is the following: |
| 530 | * Have a port with QoS policy and minimum bandwidth rule with ingress |
| 531 | direction |
| 532 | * Boot a VM with the port. |
| 533 | * Update the port with a new policy to egress direction in |
| 534 | minimum bandwidth rule. |
| 535 | * The allocation on placement side should be according to the new |
| 536 | rules. |
| 537 | """ |
| 538 | if not utils.is_network_feature_enabled('update_port_qos'): |
| 539 | raise self.skipException("update_port_qos feature is not enabled") |
| 540 | |
| 541 | def create_policies(): |
| 542 | self.qos_policy_ingress = self._create_policy_and_min_bw_rule( |
| 543 | name_prefix='test_policy_ingress', |
| 544 | min_kbps=self.BANDWIDTH_1, |
| 545 | direction=self.INGRESS_DIRECTION, |
| 546 | ) |
| 547 | self.qos_policy_egress = self._create_policy_and_min_bw_rule( |
| 548 | name_prefix='test_policy_egress', |
| 549 | min_kbps=self.BANDWIDTH_1, |
| 550 | direction=self.EGRESS_DIRECTION, |
| 551 | ) |
| 552 | |
| 553 | self._create_network_and_qos_policies(create_policies) |
| 554 | |
| 555 | port = self.create_port( |
| 556 | self.prov_network['id'], |
| 557 | qos_policy_id=self.qos_policy_ingress['id']) |
| 558 | |
| 559 | server1 = self.create_server( |
| 560 | networks=[{'port': port['id']}]) |
| 561 | |
| 562 | self._assert_allocation_is_as_expected( |
| 563 | server1['id'], [port['id']], self.BANDWIDTH_1, |
| 564 | expected_rc=self.INGRESS_RESOURCE_CLASS) |
| 565 | |
| 566 | self.ports_client.update_port( |
| 567 | port['id'], |
| 568 | qos_policy_id=self.qos_policy_egress['id']) |
| 569 | |
| 570 | self._assert_allocation_is_as_expected( |
| 571 | server1['id'], [port['id']], self.BANDWIDTH_1, |
| 572 | expected_rc=self.EGRESS_RESOURCE_CLASS) |
| 573 | self._assert_allocation_is_as_expected( |
| 574 | server1['id'], [port['id']], 0, |
| 575 | expected_rc=self.INGRESS_RESOURCE_CLASS) |
| 576 | |
Balazs Gibizer | f294b0d | 2021-09-29 16:16:44 +0200 | [diff] [blame] | 577 | |
| 578 | class QoSBandwidthAndPacketRateTests(NetworkQoSPlacementTestBase): |
| 579 | |
| 580 | PPS_RESOURCE_CLASS = "NET_PACKET_RATE_KILOPACKET_PER_SEC" |
| 581 | |
| 582 | @classmethod |
| 583 | def skip_checks(cls): |
| 584 | super().skip_checks() |
| 585 | if not CONF.network_feature_enabled.qos_min_bw_and_pps: |
| 586 | msg = ( |
| 587 | "Skipped as no resource inventories are configured for QoS " |
| 588 | "minimum bandwidth and packet rate testing.") |
| 589 | raise cls.skipException(msg) |
| 590 | |
| 591 | @classmethod |
| 592 | def setup_clients(cls): |
| 593 | super().setup_clients() |
| 594 | cls.qos_min_pps_client = cls.os_admin.qos_min_pps_client |
| 595 | |
| 596 | def setUp(self): |
| 597 | super().setUp() |
| 598 | self.network = self._create_network() |
| 599 | |
| 600 | def _create_qos_policy_with_bw_and_pps_rules(self, min_kbps, min_kpps): |
| 601 | policy = self.qos_client.create_qos_policy( |
| 602 | name=data_utils.rand_name(), |
| 603 | shared=True |
| 604 | )['policy'] |
| 605 | self.addCleanup( |
| 606 | test_utils.call_and_ignore_notfound_exc, |
| 607 | self.qos_client.delete_qos_policy, |
| 608 | policy['id'] |
| 609 | ) |
| 610 | |
| 611 | if min_kbps > 0: |
| 612 | bw_rule = self.qos_min_bw_client.create_minimum_bandwidth_rule( |
| 613 | policy['id'], |
| 614 | min_kbps=min_kbps, |
| 615 | direction=self.INGRESS_DIRECTION |
| 616 | )['minimum_bandwidth_rule'] |
| 617 | self.addCleanup( |
| 618 | test_utils.call_and_ignore_notfound_exc, |
| 619 | self.qos_min_bw_client.delete_minimum_bandwidth_rule, |
| 620 | policy['id'], |
| 621 | bw_rule['id'] |
| 622 | ) |
| 623 | |
| 624 | if min_kpps > 0: |
| 625 | pps_rule = self.qos_min_pps_client.create_minimum_packet_rate_rule( |
| 626 | policy['id'], |
| 627 | min_kpps=min_kpps, |
| 628 | direction=self.ANY_DIRECTION |
| 629 | )['minimum_packet_rate_rule'] |
| 630 | self.addCleanup( |
| 631 | test_utils.call_and_ignore_notfound_exc, |
| 632 | self.qos_min_pps_client.delete_minimum_packet_rate_rule, |
| 633 | policy['id'], |
| 634 | pps_rule['id'] |
| 635 | ) |
| 636 | |
| 637 | return policy |
| 638 | |
| 639 | def _create_network(self): |
| 640 | physnet_name = CONF.network_feature_enabled.qos_placement_physnet |
| 641 | base_segm = ( |
| 642 | CONF.network_feature_enabled.provider_net_base_segmentation_id) |
| 643 | |
| 644 | # setup_network_subnet_with_router will add the necessary cleanup calls |
| 645 | network, _, _ = self.setup_network_subnet_with_router( |
| 646 | networks_client=self.networks_client, |
| 647 | routers_client=self.routers_client, |
| 648 | subnets_client=self.subnets_client, |
| 649 | shared=True, |
| 650 | **{ |
| 651 | 'provider:network_type': 'vlan', |
| 652 | 'provider:physical_network': physnet_name, |
| 653 | # +1 to be different from the segmentation_id used in |
| 654 | # MinBwAllocationPlacementTest |
| 655 | 'provider:segmentation_id': int(base_segm) + 1, |
| 656 | } |
| 657 | ) |
| 658 | return network |
| 659 | |
| 660 | def _create_port_with_qos_policy(self, policy): |
| 661 | port = self.ports_client.create_port( |
| 662 | name=data_utils.rand_name(self.__class__.__name__), |
| 663 | network_id=self.network['id'], |
| 664 | qos_policy_id=policy['id'] if policy else None, |
| 665 | )['port'] |
| 666 | self.addCleanup( |
| 667 | test_utils.call_and_ignore_notfound_exc, |
| 668 | self.ports_client.delete_port, port['id'] |
| 669 | ) |
| 670 | return port |
| 671 | |
| 672 | def assert_allocations( |
| 673 | self, server, port, expected_min_kbps, expected_min_kpps |
| 674 | ): |
| 675 | allocations = self.placement_client.list_allocations( |
| 676 | server['id'])['allocations'] |
| 677 | |
| 678 | # one allocation for the flavor related resources on the compute RP |
| 679 | expected_allocation = 1 |
| 680 | # one allocation due to bw rule |
| 681 | if expected_min_kbps > 0: |
| 682 | expected_allocation += 1 |
| 683 | # one allocation due to pps rule |
| 684 | if expected_min_kpps > 0: |
| 685 | expected_allocation += 1 |
| 686 | self.assertEqual(expected_allocation, len(allocations), allocations) |
| 687 | |
| 688 | expected_rp_uuids_in_binding_allocation = set() |
| 689 | |
| 690 | if expected_min_kbps > 0: |
| 691 | bw_rp_allocs = { |
Balazs Gibizer | 770dc00 | 2021-09-15 15:58:25 +0200 | [diff] [blame] | 692 | rp: alloc['resources'][self.INGRESS_RESOURCE_CLASS] |
Balazs Gibizer | f294b0d | 2021-09-29 16:16:44 +0200 | [diff] [blame] | 693 | for rp, alloc in allocations.items() |
Balazs Gibizer | 770dc00 | 2021-09-15 15:58:25 +0200 | [diff] [blame] | 694 | if self.INGRESS_RESOURCE_CLASS in alloc['resources'] |
Balazs Gibizer | f294b0d | 2021-09-29 16:16:44 +0200 | [diff] [blame] | 695 | } |
| 696 | self.assertEqual(1, len(bw_rp_allocs)) |
| 697 | bw_rp, bw_alloc = list(bw_rp_allocs.items())[0] |
| 698 | self.assertEqual(expected_min_kbps, bw_alloc) |
| 699 | expected_rp_uuids_in_binding_allocation.add(bw_rp) |
| 700 | |
| 701 | if expected_min_kpps > 0: |
| 702 | pps_rp_allocs = { |
| 703 | rp: alloc['resources'][self.PPS_RESOURCE_CLASS] |
| 704 | for rp, alloc in allocations.items() |
| 705 | if self.PPS_RESOURCE_CLASS in alloc['resources'] |
| 706 | } |
| 707 | self.assertEqual(1, len(pps_rp_allocs)) |
| 708 | pps_rp, pps_alloc = list(pps_rp_allocs.items())[0] |
| 709 | self.assertEqual(expected_min_kpps, pps_alloc) |
| 710 | expected_rp_uuids_in_binding_allocation.add(pps_rp) |
| 711 | |
| 712 | # Let's check port.binding:profile.allocation points to the two |
| 713 | # provider resource allocated from |
| 714 | port = self.os_admin.ports_client.show_port(port['id']) |
| 715 | port_binding_alloc = port[ |
| 716 | 'port']['binding:profile'].get('allocation', {}) |
| 717 | self.assertEqual( |
| 718 | expected_rp_uuids_in_binding_allocation, |
| 719 | set(port_binding_alloc.values()) |
| 720 | ) |
| 721 | |
| 722 | def assert_no_allocation(self, server, port): |
| 723 | # check that there are no allocations |
| 724 | allocations = self.placement_client.list_allocations( |
| 725 | server['id'])['allocations'] |
| 726 | self.assertEqual(0, len(allocations)) |
| 727 | |
| 728 | # check that binding_profile of the port is empty |
| 729 | port = self.os_admin.ports_client.show_port(port['id']) |
| 730 | self.assertEqual(0, len(port['port']['binding:profile'])) |
| 731 | |
| 732 | @decorators.idempotent_id('93d1a88d-235e-4b7b-b44d-2a17dcf4e213') |
| 733 | @utils.services('compute', 'network') |
| 734 | def test_server_create_delete(self): |
| 735 | min_kbps = 1000 |
| 736 | min_kpps = 100 |
| 737 | policy = self._create_qos_policy_with_bw_and_pps_rules( |
| 738 | min_kbps, min_kpps) |
| 739 | port = self._create_port_with_qos_policy(policy) |
| 740 | |
| 741 | server = self.create_server( |
| 742 | networks=[{'port': port['id']}], |
| 743 | wait_until='ACTIVE' |
| 744 | ) |
| 745 | |
| 746 | self.assert_allocations(server, port, min_kbps, min_kpps) |
| 747 | |
| 748 | self.servers_client.delete_server(server['id']) |
| 749 | waiters.wait_for_server_termination(self.servers_client, server['id']) |
| 750 | |
| 751 | self.assert_no_allocation(server, port) |
| 752 | |
| 753 | def _test_create_server_negative(self, min_kbps=1000, min_kpps=100): |
| 754 | policy = self._create_qos_policy_with_bw_and_pps_rules( |
| 755 | min_kbps, min_kpps) |
| 756 | port = self._create_port_with_qos_policy(policy) |
| 757 | |
| 758 | server = self.create_server( |
| 759 | networks=[{'port': port['id']}], |
| 760 | wait_until=None) |
| 761 | waiters.wait_for_server_status( |
| 762 | client=self.servers_client, server_id=server['id'], |
| 763 | status='ERROR', ready_wait=False, raise_on_error=False) |
| 764 | |
| 765 | # check that the creation failed with No valid host |
| 766 | server = self.servers_client.show_server(server['id'])['server'] |
| 767 | self.assertIn('fault', server) |
| 768 | self.assertIn('No valid host', server['fault']['message']) |
| 769 | |
| 770 | self.assert_no_allocation(server, port) |
| 771 | |
| 772 | @decorators.idempotent_id('915dd2ce-4890-40c8-9db6-f3e04080c6c1') |
| 773 | @utils.services('compute', 'network') |
| 774 | def test_server_create_no_valid_host_due_to_bandwidth(self): |
| 775 | self._test_create_server_negative(min_kbps=self.PLACEMENT_MAX_INT) |
| 776 | |
| 777 | @decorators.idempotent_id('2d4a755e-10b9-4ac0-bef2-3f89de1f150b') |
| 778 | @utils.services('compute', 'network') |
| 779 | def test_server_create_no_valid_host_due_to_packet_rate(self): |
| 780 | self._test_create_server_negative(min_kpps=self.PLACEMENT_MAX_INT) |
| 781 | |
| 782 | @decorators.idempotent_id('69d93e4f-0dfc-4d17-8d84-cc5c3c842cd5') |
| 783 | @testtools.skipUnless( |
| 784 | CONF.compute_feature_enabled.resize, 'Resize not available.') |
| 785 | @utils.services('compute', 'network') |
| 786 | def test_server_resize(self): |
| 787 | min_kbps = 1000 |
| 788 | min_kpps = 100 |
| 789 | policy = self._create_qos_policy_with_bw_and_pps_rules( |
| 790 | min_kbps, min_kpps) |
| 791 | port = self._create_port_with_qos_policy(policy) |
| 792 | |
| 793 | server = self.create_server( |
| 794 | networks=[{'port': port['id']}], |
| 795 | wait_until='ACTIVE' |
| 796 | ) |
| 797 | |
| 798 | self.assert_allocations(server, port, min_kbps, min_kpps) |
| 799 | |
| 800 | new_flavor = self._create_flavor_to_resize_to() |
| 801 | |
| 802 | self.servers_client.resize_server( |
| 803 | server_id=server['id'], flavor_ref=new_flavor['id'] |
| 804 | ) |
| 805 | waiters.wait_for_server_status( |
| 806 | client=self.servers_client, server_id=server['id'], |
| 807 | status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False) |
| 808 | |
| 809 | self.assert_allocations(server, port, min_kbps, min_kpps) |
| 810 | |
| 811 | self.servers_client.confirm_resize_server(server_id=server['id']) |
| 812 | waiters.wait_for_server_status( |
| 813 | client=self.servers_client, server_id=server['id'], |
| 814 | status='ACTIVE', ready_wait=False, raise_on_error=True) |
| 815 | |
| 816 | self.assert_allocations(server, port, min_kbps, min_kpps) |
| 817 | |
| 818 | @decorators.idempotent_id('d01d4aee-ca06-4e4e-add7-8a47fe0daf96') |
| 819 | @testtools.skipUnless( |
| 820 | CONF.compute_feature_enabled.resize, 'Resize not available.') |
| 821 | @utils.services('compute', 'network') |
| 822 | def test_server_resize_revert(self): |
| 823 | min_kbps = 1000 |
| 824 | min_kpps = 100 |
| 825 | policy = self._create_qos_policy_with_bw_and_pps_rules( |
| 826 | min_kbps, min_kpps) |
| 827 | port = self._create_port_with_qos_policy(policy) |
| 828 | |
| 829 | server = self.create_server( |
| 830 | networks=[{'port': port['id']}], |
| 831 | wait_until='ACTIVE' |
| 832 | ) |
| 833 | |
| 834 | self.assert_allocations(server, port, min_kbps, min_kpps) |
| 835 | |
| 836 | new_flavor = self._create_flavor_to_resize_to() |
| 837 | |
| 838 | self.servers_client.resize_server( |
| 839 | server_id=server['id'], flavor_ref=new_flavor['id'] |
| 840 | ) |
| 841 | waiters.wait_for_server_status( |
| 842 | client=self.servers_client, server_id=server['id'], |
| 843 | status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False) |
| 844 | |
| 845 | self.assert_allocations(server, port, min_kbps, min_kpps) |
| 846 | |
| 847 | self.servers_client.revert_resize_server(server_id=server['id']) |
| 848 | waiters.wait_for_server_status( |
| 849 | client=self.servers_client, server_id=server['id'], |
| 850 | status='ACTIVE', ready_wait=False, raise_on_error=True) |
| 851 | |
| 852 | self.assert_allocations(server, port, min_kbps, min_kpps) |
| 853 | |
| 854 | @decorators.idempotent_id('bdd0b31c-c8b0-4b7b-b80a-545a46b32abe') |
| 855 | @testtools.skipUnless( |
| 856 | CONF.compute_feature_enabled.cold_migration, |
| 857 | 'Cold migration is not available.') |
| 858 | @testtools.skipUnless( |
| 859 | CONF.compute.min_compute_nodes > 1, |
| 860 | 'Less than 2 compute nodes, skipping multinode tests.') |
| 861 | @utils.services('compute', 'network') |
| 862 | def test_server_migrate(self): |
| 863 | min_kbps = 1000 |
| 864 | min_kpps = 100 |
| 865 | policy = self._create_qos_policy_with_bw_and_pps_rules( |
| 866 | min_kbps, min_kpps) |
| 867 | port = self._create_port_with_qos_policy(policy) |
| 868 | |
| 869 | server = self.create_server( |
| 870 | networks=[{'port': port['id']}], |
| 871 | wait_until='ACTIVE' |
| 872 | ) |
| 873 | |
| 874 | self.assert_allocations(server, port, min_kbps, min_kpps) |
| 875 | |
| 876 | self.os_adm.servers_client.migrate_server(server_id=server['id']) |
| 877 | waiters.wait_for_server_status( |
| 878 | client=self.servers_client, server_id=server['id'], |
| 879 | status='VERIFY_RESIZE', ready_wait=False, raise_on_error=False) |
| 880 | |
| 881 | self.assert_allocations(server, port, min_kbps, min_kpps) |
| 882 | |
| 883 | self.os_adm.servers_client.confirm_resize_server( |
| 884 | server_id=server['id']) |
| 885 | waiters.wait_for_server_status( |
| 886 | client=self.servers_client, server_id=server['id'], |
| 887 | status='ACTIVE', ready_wait=False, raise_on_error=True) |
| 888 | |
| 889 | self.assert_allocations(server, port, min_kbps, min_kpps) |
| 890 | |
| 891 | @decorators.idempotent_id('fdb260e3-caa5-482d-ac7c-8c22adf3d750') |
| 892 | @utils.services('compute', 'network') |
| 893 | def test_qos_policy_update_on_bound_port(self): |
| 894 | min_kbps = 1000 |
| 895 | min_kpps = 100 |
| 896 | policy = self._create_qos_policy_with_bw_and_pps_rules( |
| 897 | min_kbps, min_kpps) |
| 898 | |
| 899 | min_kbps2 = 2000 |
| 900 | min_kpps2 = 50 |
| 901 | policy2 = self._create_qos_policy_with_bw_and_pps_rules( |
| 902 | min_kbps2, min_kpps2) |
| 903 | |
| 904 | port = self._create_port_with_qos_policy(policy) |
| 905 | |
| 906 | server = self.create_server( |
| 907 | networks=[{'port': port['id']}], |
| 908 | wait_until='ACTIVE' |
| 909 | ) |
| 910 | |
| 911 | self.assert_allocations(server, port, min_kbps, min_kpps) |
| 912 | |
| 913 | self.ports_client.update_port( |
| 914 | port['id'], |
| 915 | qos_policy_id=policy2['id']) |
| 916 | |
| 917 | self.assert_allocations(server, port, min_kbps2, min_kpps2) |
| 918 | |
| 919 | @decorators.idempotent_id('e6a20125-a02e-49f5-bcf6-894305ee3715') |
| 920 | @utils.services('compute', 'network') |
| 921 | def test_qos_policy_update_on_bound_port_from_null_policy(self): |
| 922 | min_kbps = 1000 |
| 923 | min_kpps = 100 |
| 924 | policy = self._create_qos_policy_with_bw_and_pps_rules( |
| 925 | min_kbps, min_kpps) |
| 926 | |
| 927 | port = self._create_port_with_qos_policy(policy=None) |
| 928 | |
| 929 | server = self.create_server( |
| 930 | networks=[{'port': port['id']}], |
| 931 | wait_until='ACTIVE' |
| 932 | ) |
| 933 | |
| 934 | self.assert_allocations(server, port, 0, 0) |
| 935 | |
| 936 | self.ports_client.update_port( |
| 937 | port['id'], |
| 938 | qos_policy_id=policy['id']) |
| 939 | |
| 940 | # NOTE(gibi): This is unintuitive but it is the expected behavior. |
| 941 | # If there was no policy attached to the port when the server was |
| 942 | # created then neutron still allows adding a policy to the port later |
| 943 | # as this operation was support before placement enforcement was added |
| 944 | # for the qos minimum bandwidth rule. However neutron cannot create |
| 945 | # the placement resource allocation for this port. |
| 946 | self.assert_allocations(server, port, 0, 0) |
| 947 | |
| 948 | @decorators.idempotent_id('f5864761-966c-4e49-b430-ac0044b7d658') |
| 949 | @utils.services('compute', 'network') |
| 950 | def test_qos_policy_update_on_bound_port_additional_rule(self): |
| 951 | min_kbps = 1000 |
| 952 | policy = self._create_qos_policy_with_bw_and_pps_rules( |
| 953 | min_kbps, 0) |
| 954 | |
| 955 | min_kbps2 = 2000 |
| 956 | min_kpps2 = 50 |
| 957 | policy2 = self._create_qos_policy_with_bw_and_pps_rules( |
| 958 | min_kbps2, min_kpps2) |
| 959 | |
| 960 | port = self._create_port_with_qos_policy(policy=policy) |
| 961 | |
| 962 | server = self.create_server( |
| 963 | networks=[{'port': port['id']}], |
| 964 | wait_until='ACTIVE' |
| 965 | ) |
| 966 | |
| 967 | self.assert_allocations(server, port, min_kbps, 0) |
| 968 | |
| 969 | self.ports_client.update_port( |
| 970 | port['id'], |
| 971 | qos_policy_id=policy2['id']) |
| 972 | |
| 973 | # FIXME(gibi): Agree in the spec: do we ignore the pps request or we |
| 974 | # reject the update? It seems current implementation goes with |
| 975 | # ignoring the additional pps rule. |
| 976 | self.assert_allocations(server, port, min_kbps2, 0) |
| 977 | |
| 978 | @decorators.idempotent_id('fbbb9c81-ed21-48c3-bdba-ce2361e93aad') |
| 979 | @utils.services('compute', 'network') |
| 980 | def test_qos_policy_update_on_bound_port_to_null_policy(self): |
| 981 | min_kbps = 1000 |
| 982 | min_kpps = 100 |
| 983 | policy = self._create_qos_policy_with_bw_and_pps_rules( |
| 984 | min_kbps, min_kpps) |
| 985 | |
| 986 | port = self._create_port_with_qos_policy(policy=policy) |
| 987 | |
| 988 | server = self.create_server( |
| 989 | networks=[{'port': port['id']}], |
| 990 | wait_until='ACTIVE' |
| 991 | ) |
| 992 | |
| 993 | self.assert_allocations(server, port, min_kbps, min_kpps) |
| 994 | |
| 995 | self.ports_client.update_port( |
| 996 | port['id'], |
| 997 | qos_policy_id=None) |
| 998 | |
| 999 | self.assert_allocations(server, port, 0, 0) |
| 1000 | |
| 1001 | @decorators.idempotent_id('0393d038-03ad-4844-a0e4-83010f69dabb') |
| 1002 | @utils.services('compute', 'network') |
| 1003 | def test_interface_attach_detach(self): |
| 1004 | min_kbps = 1000 |
| 1005 | min_kpps = 100 |
| 1006 | policy = self._create_qos_policy_with_bw_and_pps_rules( |
| 1007 | min_kbps, min_kpps) |
| 1008 | |
| 1009 | port = self._create_port_with_qos_policy(policy=None) |
| 1010 | |
| 1011 | port2 = self._create_port_with_qos_policy(policy=policy) |
| 1012 | |
| 1013 | server = self.create_server( |
| 1014 | networks=[{'port': port['id']}], |
| 1015 | wait_until='ACTIVE' |
| 1016 | ) |
| 1017 | |
| 1018 | self.assert_allocations(server, port, 0, 0) |
| 1019 | |
| 1020 | self.interface_client.create_interface( |
| 1021 | server_id=server['id'], |
| 1022 | port_id=port2['id']) |
| 1023 | waiters.wait_for_interface_status( |
| 1024 | self.interface_client, server['id'], port2['id'], 'ACTIVE') |
| 1025 | |
| 1026 | self.assert_allocations(server, port2, min_kbps, min_kpps) |
| 1027 | |
| 1028 | req_id = self.interface_client.delete_interface( |
| 1029 | server_id=server['id'], |
| 1030 | port_id=port2['id']).response['x-openstack-request-id'] |
| 1031 | waiters.wait_for_interface_detach( |
| 1032 | self.servers_client, server['id'], port2['id'], req_id) |
| 1033 | |
| 1034 | self.assert_allocations(server, port2, 0, 0) |
| 1035 | |
| 1036 | @decorators.idempotent_id('36ffdb85-6cc2-4cc9-a426-cad5bac8626b') |
| 1037 | @testtools.skipUnless( |
| 1038 | CONF.compute.min_compute_nodes > 1, |
| 1039 | 'Less than 2 compute nodes, skipping multinode tests.') |
| 1040 | @testtools.skipUnless( |
| 1041 | CONF.compute_feature_enabled.live_migration, |
| 1042 | 'Live migration not available') |
| 1043 | @utils.services('compute', 'network') |
| 1044 | def test_server_live_migrate(self): |
| 1045 | min_kbps = 1000 |
| 1046 | min_kpps = 100 |
| 1047 | policy = self._create_qos_policy_with_bw_and_pps_rules( |
| 1048 | min_kbps, min_kpps) |
| 1049 | |
| 1050 | port = self._create_port_with_qos_policy(policy=policy) |
| 1051 | |
| 1052 | server = self.create_server( |
| 1053 | networks=[{'port': port['id']}], |
| 1054 | wait_until='ACTIVE' |
| 1055 | ) |
| 1056 | |
| 1057 | self.assert_allocations(server, port, min_kbps, min_kpps) |
| 1058 | |
| 1059 | server_details = self.os_adm.servers_client.show_server(server['id']) |
| 1060 | source_host = server_details['server']['OS-EXT-SRV-ATTR:host'] |
| 1061 | |
| 1062 | self.os_adm.servers_client.live_migrate_server( |
| 1063 | server['id'], block_migration=True, host=None) |
| 1064 | waiters.wait_for_server_status( |
| 1065 | self.servers_client, server['id'], 'ACTIVE') |
| 1066 | |
| 1067 | server_details = self.os_adm.servers_client.show_server(server['id']) |
| 1068 | new_host = server_details['server']['OS-EXT-SRV-ATTR:host'] |
| 1069 | |
| 1070 | self.assertNotEqual(source_host, new_host, "Live migration failed") |
| 1071 | |
| 1072 | self.assert_allocations(server, port, min_kbps, min_kpps) |