Merge "Add related test to Bug #1732428"
diff --git a/.zuul.yaml b/.zuul.yaml
index 5e3f33a..c20f204 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -13,8 +13,6 @@
roles: &base_roles
- zuul: opendev.org/openstack/devstack
vars: &base_vars
- # TODO(gmann): Remove these test skip once nova bug #1882521 is solved
- tempest_black_regex: "(tempest.api.compute.volumes.test_attach_volume.AttachVolumeMultiAttachTest.test_resize_server_with_multiattached_volume|tempest.api.compute.servers.test_server_rescue_negative.ServerRescueNegativeTestJSON|tempest.api.compute.servers.test_server_rescue.ServerStableDeviceRescueTest.test_stable_device_rescue_disk_virtio_with_volume_attached)"
devstack_services:
tempest: true
devstack_local_conf:
@@ -95,6 +93,10 @@
branches: ^(?!stable/ocata).*$
description: |
Base integration test with Neutron networking and py27.
+ This job is supposed to run until stable/train setup only.
+ If you are running it on stable/ussuri gate onwards for python2.7
+ coverage then you need to do override-checkout with any stable
+ branch less than or equal to stable/train.
Former names for this job where:
* legacy-tempest-dsvm-neutron-full
* gate-tempest-dsvm-neutron-full-ubuntu-xenial
@@ -114,7 +116,7 @@
- job:
name: tempest-full-oslo-master
- parent: tempest-full
+ parent: tempest-full-py3
description: |
Integration test using current git of oslo libs.
This ensures that when oslo libs get released that they
@@ -142,9 +144,6 @@
- opendev.org/openstack/oslo.utils
- opendev.org/openstack/oslo.versionedobjects
- opendev.org/openstack/oslo.vmware
- vars:
- devstack_localrc:
- USE_PYTHON3: True
- job:
name: tempest-full-parallel
@@ -201,7 +200,7 @@
branches: ^(?!stable/ocata).*$
description: |
This job runs integration tests for networking. This is subset of
- 'tempest-full' job and run only Neutron and Nova related tests.
+ 'tempest-full-py3' job and run only Neutron and Nova related tests.
This is meant to be run on neutron gate only.
vars:
tox_envlist: integrated-network
@@ -218,11 +217,10 @@
- job:
name: tempest-integrated-compute
parent: devstack-tempest
- nodeset: openstack-single-node-bionic
branches: ^(?!stable/ocata).*$
description: |
This job runs integration tests for compute. This is
- subset of 'tempest-full' job and run Nova, Neutron, Cinder (except backup tests)
+ subset of 'tempest-full-py3' job and run Nova, Neutron, Cinder (except backup tests)
and Glance related tests. This is meant to be run on Nova gate only.
vars:
tox_envlist: integrated-compute
@@ -244,7 +242,7 @@
branches: ^(?!stable/ocata).*$
description: |
This job runs integration tests for placement. This is
- subset of 'tempest-full' job and run Nova and Neutron
+ subset of 'tempest-full-py3' job and run Nova and Neutron
related tests. This is meant to be run on Placement gate only.
vars:
tox_envlist: integrated-placement
@@ -265,7 +263,7 @@
branches: ^(?!stable/ocata).*$
description: |
This job runs integration tests for image & block storage. This is
- subset of 'tempest-full' job and run Cinder, Glance, Swift and Nova
+ subset of 'tempest-full-py3' job and run Cinder, Glance, Swift and Nova
related tests. This is meant to be run on Cinder and Glance gate only.
vars:
tox_envlist: integrated-storage
@@ -281,7 +279,7 @@
branches: ^(?!stable/ocata).*$
description: |
This job runs integration tests for object storage. This is
- subset of 'tempest-full' job and run Swift, Cinder and Glance
+ subset of 'tempest-full-py3' job and run Swift, Cinder and Glance
related tests. This is meant to be run on Swift gate only.
vars:
tox_envlist: integrated-object-storage
@@ -294,9 +292,6 @@
- job:
name: tempest-full-py3-ipv6
parent: devstack-tempest-ipv6
- # This currently works from stable/pike on.
- # Before stable/pike, legacy version of tempest-full
- # 'legacy-tempest-dsvm-neutron-full' run.
branches: ^(?!stable/ocata).*$
description: |
Base integration test with Neutron networking, IPv6 and py3.
@@ -473,6 +468,11 @@
USE_PYTHON3: true
- job:
+ name: tempest-full-victoria-py3
+ parent: tempest-full-py3
+ override-checkout: stable/victoria
+
+- job:
name: tempest-full-ussuri-py3
parent: tempest-full-py3
nodeset: openstack-single-node-bionic
@@ -542,7 +542,7 @@
- job:
name: tempest-pg-full
- parent: tempest-full
+ parent: tempest-full-py3
description: |
Base integration test with Neutron networking and PostgreSQL.
Former name for this job was legacy-tempest-dsvm-neutron-pg-full.
@@ -550,7 +550,6 @@
devstack_localrc:
ENABLE_FILE_INJECTION: true
DATABASE_TYPE: postgresql
- USE_PYTHON3: True
- project-template:
name: integrated-gate-networking
@@ -672,6 +671,11 @@
- tempest-full-py3-ipv6:
voting: false
irrelevant-files: *tempest-irrelevant-files
+ - glance-multistore-cinder-import:
+ voting: false
+ irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-victoria-py3:
+ irrelevant-files: *tempest-irrelevant-files
- tempest-full-ussuri-py3:
irrelevant-files: *tempest-irrelevant-files
- tempest-full-train-py3:
@@ -720,6 +724,7 @@
voting: false
irrelevant-files: *tempest-irrelevant-files
- neutron-tempest-dvr:
+ voting: false
irrelevant-files: *tempest-irrelevant-files
- interop-tempest-consistency:
irrelevant-files: *tempest-irrelevant-files
@@ -763,6 +768,7 @@
irrelevant-files: *tempest-irrelevant-files
periodic-stable:
jobs:
+ - tempest-full-victoria-py3
- tempest-full-ussuri-py3
- tempest-full-train-py3
- tempest-full-stein-py3
diff --git a/doc/source/microversion_testing.rst b/doc/source/microversion_testing.rst
index 5bc0eac..c7004dd 100644
--- a/doc/source/microversion_testing.rst
+++ b/doc/source/microversion_testing.rst
@@ -352,15 +352,15 @@
* `2.37`_
- .. _2.37: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id34
+ .. _2.37: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id35
* `2.39`_
- .. _2.39: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id35
+ .. _2.39: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id36
* `2.41`_
- .. _2.41: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id37
+ .. _2.41: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id38
* `2.42`_
@@ -368,15 +368,15 @@
* `2.47`_
- .. _2.47: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id42
+ .. _2.47: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id43
* `2.48`_
- .. _2.48: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id43
+ .. _2.48: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id44
* `2.49`_
- .. _2.49: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id44
+ .. _2.49: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id45
* `2.53`_
@@ -384,15 +384,15 @@
* `2.54`_
- .. _2.54: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id49
+ .. _2.54: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id50
* `2.55`_
- .. _2.55: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id50
+ .. _2.55: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id51
* `2.57`_
- .. _2.57: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id52
+ .. _2.57: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id53
* `2.59`_
@@ -404,19 +404,19 @@
* `2.61`_
- .. _2.61: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id55
+ .. _2.61: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id56
* `2.63`_
- .. _2.63: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id57
+ .. _2.63: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id58
* `2.70`_
- .. _2.70: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id63
+ .. _2.70: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id64
* `2.71`_
- .. _2.71: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id64
+ .. _2.71: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#id65
* `2.73`_
diff --git a/playbooks/devstack-tempest-ipv6.yaml b/playbooks/devstack-tempest-ipv6.yaml
index 5f72345..4788362 100644
--- a/playbooks/devstack-tempest-ipv6.yaml
+++ b/playbooks/devstack-tempest-ipv6.yaml
@@ -7,11 +7,6 @@
# We run tests only on one node, regardless how many nodes are in the system
- hosts: tempest
- environment:
- # This enviroment variable is used by the optional tempest-gabbi
- # job provided by the gabbi-tempest plugin. It can be safely ignored
- # if that plugin is not being used.
- GABBI_TEMPEST_PATH: "{{ gabbi_tempest_path | default('') }}"
roles:
- setup-tempest-run-dir
- setup-tempest-data-dir
diff --git a/playbooks/devstack-tempest.yaml b/playbooks/devstack-tempest.yaml
index 7ee7411..3b969f2 100644
--- a/playbooks/devstack-tempest.yaml
+++ b/playbooks/devstack-tempest.yaml
@@ -7,11 +7,6 @@
# We run tests only on one node, regardless how many nodes are in the system
- hosts: tempest
- environment:
- # This enviroment variable is used by the optional tempest-gabbi
- # job provided by the gabbi-tempest plugin. It can be safely ignored
- # if that plugin is not being used.
- GABBI_TEMPEST_PATH: "{{ gabbi_tempest_path | default('') }}"
tasks:
- name: Setup Tempest Run Directory
include_role:
@@ -30,9 +25,9 @@
name: tempest-cleanup
vars:
init_saved_state: true
- when:
- - run_tempest_dry_cleanup is defined
- - run_tempest_cleanup is defined
+ when: (run_tempest_dry_cleanup is defined and run_tempest_dry_cleanup | bool) or
+ (run_tempest_cleanup is defined and run_tempest_cleanup | bool) or
+ (run_tempest_fail_if_leaked_resources is defined and run_tempest_fail_if_leaked_resources | bool)
- name: Run Tempest
include_role:
@@ -43,10 +38,9 @@
name: tempest-cleanup
vars:
dry_run: true
- when:
- - run_tempest_dry_cleanup is defined
+ when: run_tempest_dry_cleanup is defined and run_tempest_dry_cleanup | bool
- name: Run tempest cleanup
include_role:
name: tempest-cleanup
- when: run_tempest_cleanup is defined
+ when: run_tempest_cleanup is defined and run_tempest_cleanup | bool
diff --git a/releasenotes/notes/Remove-test_reboot_server_soft-48fa786f38cd94dc.yaml b/releasenotes/notes/Remove-test_reboot_server_soft-48fa786f38cd94dc.yaml
new file mode 100644
index 0000000..fb84d25
--- /dev/null
+++ b/releasenotes/notes/Remove-test_reboot_server_soft-48fa786f38cd94dc.yaml
@@ -0,0 +1,6 @@
+---
+upgrade:
+ - |
+ The test_reboot_server_soft has been skipped for more than 6 years.
+ Take into account that the minimum scenario test uses soft reboot
+ and the nova functional test also covers reboot.
diff --git a/releasenotes/notes/add-image-alt-ssh-user-config-option-1b775af2f468aa5b.yaml b/releasenotes/notes/add-image-alt-ssh-user-config-option-1b775af2f468aa5b.yaml
new file mode 100644
index 0000000..159bbe8
--- /dev/null
+++ b/releasenotes/notes/add-image-alt-ssh-user-config-option-1b775af2f468aa5b.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - A new config option in the validation section, image_alt_ssh_user,
+ to specify the user name used to authenticate to an alternative
+ instance (instance using image_ref_alt) in tests. By default this
+ is set to root.
+ - A new config option in the validation section, image_alt_ssh_password,
+ to specify the password used to authenticate to an alternative
+ instance (instance using image_ref_alt) in tests. By default this
+ is set to password.
diff --git a/releasenotes/notes/add-keystone-v3-ec2-tests-d959b7d36f0bd7fc.yaml b/releasenotes/notes/add-keystone-v3-ec2-tests-d959b7d36f0bd7fc.yaml
new file mode 100644
index 0000000..ab8d748
--- /dev/null
+++ b/releasenotes/notes/add-keystone-v3-ec2-tests-d959b7d36f0bd7fc.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Added missing clients and tests for keystone's v3 EC2 API which already
+ existed for keystone v2.
diff --git a/releasenotes/notes/add-show-default-volume-types-api-to-v3-types-client-44b2676f217d78dc.yaml b/releasenotes/notes/add-show-default-volume-types-api-to-v3-types-client-44b2676f217d78dc.yaml
new file mode 100644
index 0000000..2cd5af6
--- /dev/null
+++ b/releasenotes/notes/add-show-default-volume-types-api-to-v3-types-client-44b2676f217d78dc.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add show type API to v3 types_client library.
+
+ * default_volume_type
diff --git a/releasenotes/notes/network_feature_enabled_available_features-35f9ac5f253e2ca3.yaml b/releasenotes/notes/network_feature_enabled_available_features-35f9ac5f253e2ca3.yaml
new file mode 100644
index 0000000..1f2d6b9
--- /dev/null
+++ b/releasenotes/notes/network_feature_enabled_available_features-35f9ac5f253e2ca3.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ New config option to ``network-feature-enabled``: ``available_features``.
+ This is a list which can contain features that are not discoverable
+ through Neutron API, or it can be the special entry ``all``.
diff --git a/releasenotes/notes/new-placement-client-methods-e35c473e29494928.yaml b/releasenotes/notes/new-placement-client-methods-e35c473e29494928.yaml
new file mode 100644
index 0000000..9e6d49a
--- /dev/null
+++ b/releasenotes/notes/new-placement-client-methods-e35c473e29494928.yaml
@@ -0,0 +1,11 @@
+---
+features:
+ - |
+ Add ``placement`` API methods for testing Routed Provider Networks feature.
+ The following API calls are available for tempest from now in the new
+ resource_providers_client:
+
+ * GET /resource_providers
+ * GET /resource_providers/{uuid}
+ * GET /resource_providers/{uuid}/inventories
+ * GET /resource_providers/{uuid}/aggregates
diff --git a/releasenotes/notes/tempest-victoria-release-27000c02edc5a112.yaml b/releasenotes/notes/tempest-victoria-release-27000c02edc5a112.yaml
index bb91213..574f6d9 100644
--- a/releasenotes/notes/tempest-victoria-release-27000c02edc5a112.yaml
+++ b/releasenotes/notes/tempest-victoria-release-27000c02edc5a112.yaml
@@ -1,3 +1,4 @@
+---
prelude: |
This release is to tag the Tempest for OpenStack Victoria release.
This release marks the start of Victoria release support in Tempest.
diff --git a/roles/tempest-cleanup/README.rst b/roles/tempest-cleanup/README.rst
index 70719ca..d1fad90 100644
--- a/roles/tempest-cleanup/README.rst
+++ b/roles/tempest-cleanup/README.rst
@@ -31,3 +31,31 @@
When true, tempest cleanup creates a report (./dry_run.json) of the
resources that would be cleaned up if the role was ran with dry_run option
set to false.
+
+.. zuul:rolevar:: run_tempest_fail_if_leaked_resources
+ :default: false
+
+ When true, the role will fail if any leaked resources are detected.
+ The detection is done via dry_run.json file which if contains any resources,
+ some must have been leaked. This can be also used to verify that tempest
+ cleanup was successful.
+
+
+Role usage
+----------
+
+The role can be also used for verification that tempest tests don't leak any
+resources or to test that 'tempest cleanup' command deleted all leaked
+resources as expected.
+Either way the role needs to be run first with init_saved_state variable set
+to true prior any tempest tests got executed.
+Then, after tempest tests got executed this role needs to be run again with
+role variables set according to the desired outcome:
+
+1. to verify that tempest tests don't leak any resources
+ run_tempest_dry_cleanup and run_tempest_fail_if_leaked_resources have to
+ be set to true.
+
+2. to check that 'tempest cleanup' command deleted all the leaked resources
+ run_tempest_cleanup and run_tempest_fail_if_leaked_resources have to be set
+ to true.
diff --git a/roles/tempest-cleanup/defaults/main.yaml b/roles/tempest-cleanup/defaults/main.yaml
index fc1948a..ce78bdb 100644
--- a/roles/tempest-cleanup/defaults/main.yaml
+++ b/roles/tempest-cleanup/defaults/main.yaml
@@ -1,3 +1,4 @@
devstack_base_dir: /opt/stack
init_saved_state: false
dry_run: false
+run_tempest_fail_if_leaked_resources: false
diff --git a/roles/tempest-cleanup/tasks/dry_run.yaml b/roles/tempest-cleanup/tasks/dry_run.yaml
new file mode 100644
index 0000000..46749ab
--- /dev/null
+++ b/roles/tempest-cleanup/tasks/dry_run.yaml
@@ -0,0 +1,7 @@
+---
+- name: Run tempest cleanup dry-run
+ become: yes
+ become_user: tempest
+ command: tox -evenv-tempest -- tempest cleanup --dry-run --debug
+ args:
+ chdir: "{{ devstack_base_dir }}/tempest"
diff --git a/roles/tempest-cleanup/tasks/dry_run_checker.py b/roles/tempest-cleanup/tasks/dry_run_checker.py
new file mode 100644
index 0000000..9cd9a85
--- /dev/null
+++ b/roles/tempest-cleanup/tasks/dry_run_checker.py
@@ -0,0 +1,71 @@
+# Copyright 2020 Red Hat, Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Utility for content checking of a given dry_run.json file.
+"""
+
+import argparse
+import json
+import sys
+
+
+def get_parser():
+ parser = argparse.ArgumentParser(__doc__)
+ parser.add_argument('--is-empty', action="store_true", dest='is_empty',
+ default=False,
+ help="""Are values of a given dry_run.json empty?""")
+ parser.add_argument('--file', dest='file', default=None, metavar='PATH',
+ help="A path to a dry_run.json file.")
+ return parser
+
+
+def parse_arguments():
+ parser = get_parser()
+ args = parser.parse_args()
+ if not args.file:
+ sys.stderr.write('Path to a dry_run.json must be specified.\n')
+ sys.exit(1)
+ return args
+
+
+def load_json(path):
+ """Load json content from file addressed by path."""
+ try:
+ with open(path, 'rb') as json_file:
+ json_data = json.load(json_file)
+ except Exception as ex:
+ sys.exit(ex)
+ return json_data
+
+
+def are_values_empty(dry_run_content):
+ """Return true if values of dry_run.json are empty."""
+ for value in dry_run_content.values():
+ if value:
+ return False
+ return True
+
+
+def main():
+ args = parse_arguments()
+ content = load_json(args.file)
+ if args.is_empty:
+ if not are_values_empty(content):
+ sys.exit(1)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/roles/tempest-cleanup/tasks/main.yaml b/roles/tempest-cleanup/tasks/main.yaml
index 5444afc..c1d63f0 100644
--- a/roles/tempest-cleanup/tasks/main.yaml
+++ b/roles/tempest-cleanup/tasks/main.yaml
@@ -12,20 +12,35 @@
- when: dry_run
block:
- - name: Run tempest cleanup dry-run
- become: yes
- become_user: tempest
- command: tox -evenv-tempest -- tempest cleanup --dry-run --debug
- args:
- chdir: "{{ devstack_base_dir }}/tempest"
+ - import_tasks: dry_run.yaml
- name: Cat dry_run.json
command: cat "{{ devstack_base_dir }}/tempest/dry_run.json"
-- name: Run tempest cleanup
- become: yes
- become_user: tempest
- command: tox -evenv-tempest -- tempest cleanup --debug
- args:
- chdir: "{{ devstack_base_dir }}/tempest"
- when: not dry_run and not init_saved_state
+- when:
+ - not dry_run
+ - not init_saved_state
+ block:
+ - name: Run tempest cleanup
+ become: yes
+ become_user: tempest
+ command: tox -evenv-tempest -- tempest cleanup --debug
+ args:
+ chdir: "{{ devstack_base_dir }}/tempest"
+
+- when:
+ - run_tempest_fail_if_leaked_resources
+ - not init_saved_state
+ block:
+ # let's run dry run again, if haven't already, to check no leftover
+ # resources were left behind after the cleanup in the previous task
+ - import_tasks: dry_run.yaml
+ when: not dry_run
+
+ - name: Fail if any resources are leaked
+ become: yes
+ become_user: tempest
+ shell: |
+ python3 roles/tempest-cleanup/tasks/dry_run_checker.py --file {{ devstack_base_dir }}/tempest/dry_run.json --is-empty
+ args:
+ chdir: "{{ devstack_base_dir }}/tempest"
diff --git a/tempest/api/compute/admin/test_migrations.py b/tempest/api/compute/admin/test_migrations.py
index 37f5aec..89152d6 100644
--- a/tempest/api/compute/admin/test_migrations.py
+++ b/tempest/api/compute/admin/test_migrations.py
@@ -94,6 +94,16 @@
# Now boot a server with the copied flavor.
server = self.create_test_server(
wait_until='ACTIVE', flavor=flavor['id'])
+ server = self.servers_client.show_server(server['id'])['server']
+
+ # If 'id' not in server['flavor'], we can only compare the flavor
+ # details, so here we should save the to-be-deleted flavor's details,
+ # for the flavor comparison after the server resizing.
+ if not server['flavor'].get('id'):
+ pre_flavor = {}
+ body = self.flavors_client.show_flavor(flavor['id'])['flavor']
+ for key in ['name', 'ram', 'vcpus', 'disk']:
+ pre_flavor[key] = body[key]
# Delete the flavor we used to boot the instance.
self._flavor_clean_up(flavor['id'])
@@ -110,7 +120,18 @@
'ACTIVE')
server = self.servers_client.show_server(server['id'])['server']
- self.assert_flavor_equal(flavor['id'], server['flavor'])
+ if server['flavor'].get('id'):
+ msg = ('server flavor is not same as flavor!')
+ self.assertEqual(flavor['id'], server['flavor']['id'], msg)
+ else:
+ self.assertEqual(pre_flavor['name'],
+ server['flavor']['original_name'],
+ "original_name in server flavor is not same as "
+ "flavor name!")
+ for key in ['ram', 'vcpus', 'disk']:
+ msg = ('attribute %s in server flavor is not same as '
+ 'flavor!' % key)
+ self.assertEqual(pre_flavor[key], server['flavor'][key], msg)
def _test_cold_migrate_server(self, revert=False):
if CONF.compute.min_compute_nodes < 2:
diff --git a/tempest/api/compute/admin/test_volume.py b/tempest/api/compute/admin/test_volume.py
new file mode 100644
index 0000000..9340997
--- /dev/null
+++ b/tempest/api/compute/admin/test_volume.py
@@ -0,0 +1,118 @@
+# Copyright 2020 Red Hat Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import six
+
+from tempest.api.compute import base
+from tempest.common import waiters
+from tempest import config
+from tempest.lib import decorators
+
+CONF = config.CONF
+
+
+class BaseAttachSCSIVolumeTest(base.BaseV2ComputeAdminTest):
+ """Base class for the admin volume tests in this module."""
+ create_default_network = True
+
+ @classmethod
+ def skip_checks(cls):
+ super(BaseAttachSCSIVolumeTest, cls).skip_checks()
+ if not CONF.service_available.cinder:
+ skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
+ raise cls.skipException(skip_msg)
+
+ @classmethod
+ def setup_credentials(cls):
+ cls.prepare_instance_network()
+ super(BaseAttachSCSIVolumeTest, cls).setup_credentials()
+
+ def _create_image_with_custom_property(self, **kwargs):
+ """Wrapper utility that returns the custom image.
+
+ Creates a new image by downloading the default image's bits and
+ uploading them to a new image. Any kwargs are set as image properties
+ on the new image.
+
+ :param return image_id: The UUID of the newly created image.
+ """
+ image = self.image_client.show_image(CONF.compute.image_ref)
+ image_data = self.image_client.show_image_file(
+ CONF.compute.image_ref).data
+ image_file = six.BytesIO(image_data)
+ create_dict = {
+ 'container_format': image['container_format'],
+ 'disk_format': image['disk_format'],
+ 'min_disk': image['min_disk'],
+ 'min_ram': image['min_ram'],
+ 'visibility': 'public',
+ }
+ create_dict.update(kwargs)
+ new_image = self.image_client.create_image(**create_dict)
+ self.addCleanup(self.image_client.wait_for_resource_deletion,
+ new_image['id'])
+ self.addCleanup(self.image_client.delete_image, new_image['id'])
+ self.image_client.store_image_file(new_image['id'], image_file)
+
+ return new_image['id']
+
+
+class AttachSCSIVolumeTestJSON(BaseAttachSCSIVolumeTest):
+ """Test attaching scsi volume to server"""
+
+ @decorators.idempotent_id('777e468f-17ca-4da4-b93d-b7dbf56c0494')
+ def test_attach_scsi_disk_with_config_drive(self):
+ """Test the attach/detach volume with config drive/scsi disk
+
+ Enable the config drive, followed by booting an instance
+ from an image with meta properties hw_cdrom: scsi and use
+ virtio-scsi mode with further asserting list volume attachments
+ in instance after attach and detach of the volume.
+ """
+ custom_img = self._create_image_with_custom_property(
+ hw_scsi_model='virtio-scsi',
+ hw_disk_bus='scsi',
+ hw_cdrom_bus='scsi')
+ server = self.create_test_server(image_id=custom_img,
+ config_drive=True,
+ wait_until='ACTIVE')
+
+ # NOTE(lyarwood): self.create_test_server delete the server
+ # at class level cleanup so add server cleanup to ensure that
+ # the instance is deleted first before created image. This
+ # avoids failures when using the rbd backend is used for both
+ # Glance and Nova ephemeral storage. Also wait until server is
+ # deleted otherwise image deletion can start before server is
+ # deleted.
+ self.addCleanup(waiters.wait_for_server_termination,
+ self.servers_client, server['id'])
+ self.addCleanup(self.servers_client.delete_server, server['id'])
+
+ volume = self.create_volume()
+ attachment = self.attach_volume(server, volume)
+ waiters.wait_for_volume_resource_status(
+ self.volumes_client, attachment['volumeId'], 'in-use')
+ volume_after_attach = self.servers_client.list_volume_attachments(
+ server['id'])['volumeAttachments']
+ self.assertEqual(1, len(volume_after_attach),
+ "Failed to attach volume")
+ self.servers_client.detach_volume(
+ server['id'], attachment['volumeId'])
+ waiters.wait_for_volume_resource_status(
+ self.volumes_client, attachment['volumeId'], 'available')
+ volume_after_detach = self.servers_client.list_volume_attachments(
+ server['id'])['volumeAttachments']
+ self.assertEqual(0, len(volume_after_detach),
+ "Failed to detach volume")
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 8b847fc..bb0f5ad 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -171,8 +171,11 @@
cls.flavor_ref = CONF.compute.flavor_ref
cls.flavor_ref_alt = CONF.compute.flavor_ref_alt
cls.ssh_user = CONF.validation.image_ssh_user
+ cls.ssh_alt_user = CONF.validation.image_alt_ssh_user
cls.image_ssh_user = CONF.validation.image_ssh_user
+ cls.image_alt_ssh_user = CONF.validation.image_alt_ssh_user
cls.image_ssh_password = CONF.validation.image_ssh_password
+ cls.image_alt_ssh_password = CONF.validation.image_alt_ssh_password
@classmethod
def is_requested_microversion_compatible(cls, max_version):
@@ -634,6 +637,7 @@
cls.os_admin.availability_zone_client)
cls.admin_flavors_client = cls.os_admin.flavors_client
cls.admin_servers_client = cls.os_admin.servers_client
+ cls.image_client = cls.os_admin.image_client_v2
def create_flavor(self, ram, vcpus, disk, name=None,
is_public='True', **kwargs):
diff --git a/tempest/api/compute/security_groups/test_security_group_rules.py b/tempest/api/compute/security_groups/test_security_group_rules.py
index 59848f6..3c4daf6 100644
--- a/tempest/api/compute/security_groups/test_security_group_rules.py
+++ b/tempest/api/compute/security_groups/test_security_group_rules.py
@@ -35,16 +35,16 @@
cls.from_port = 22
cls.to_port = 22
- def setUp(cls):
- super(SecurityGroupRulesTestJSON, cls).setUp()
+ def setUp(self):
+ super(SecurityGroupRulesTestJSON, self).setUp()
- from_port = cls.from_port
- to_port = cls.to_port
+ from_port = self.from_port
+ to_port = self.to_port
group = {}
ip_range = {}
- cls.expected = {
+ self.expected = {
'parent_group_id': None,
- 'ip_protocol': cls.ip_protocol,
+ 'ip_protocol': self.ip_protocol,
'from_port': from_port,
'to_port': to_port,
'ip_range': ip_range,
diff --git a/tempest/api/compute/servers/test_device_tagging.py b/tempest/api/compute/servers/test_device_tagging.py
index a7e2187..58d4d7d 100644
--- a/tempest/api/compute/servers/test_device_tagging.py
+++ b/tempest/api/compute/servers/test_device_tagging.py
@@ -338,7 +338,9 @@
found_devices = [d['tags'][0] for d in md_dict['devices']
if d.get('tags')]
try:
- self.assertItemsEqual(found_devices, ['nic-tag', 'volume-tag'])
+ self.assertEqual(
+ sorted(found_devices),
+ sorted(['nic-tag', 'volume-tag']))
return True
except Exception:
return False
diff --git a/tempest/api/compute/servers/test_novnc.py b/tempest/api/compute/servers/test_novnc.py
index 7931ca9..6ebdbdb 100644
--- a/tempest/api/compute/servers/test_novnc.py
+++ b/tempest/api/compute/servers/test_novnc.py
@@ -26,11 +26,6 @@
CONF = config.CONF
-if six.PY2:
- ord_func = ord
-else:
- ord_func = int
-
class NoVNCConsoleTestJSON(base.BaseV2ComputeTest):
"""Test novnc console"""
@@ -116,14 +111,14 @@
# single word(4 bytes).
self.assertEqual(
data_length, 4, 'Expected authentication type None.')
- self.assertIn(1, [ord_func(data[i]) for i in (0, 3)],
+ self.assertIn(1, [int(data[i]) for i in (0, 3)],
'Expected authentication type None.')
else:
self.assertGreaterEqual(
len(data), 2, 'Expected authentication type None.')
self.assertIn(
1,
- [ord_func(data[i + 1]) for i in range(ord_func(data[0]))],
+ [int(data[i + 1]) for i in range(int(data[0]))],
'Expected authentication type None.')
# Send to the server that we only support authentication
# type None
@@ -136,7 +131,7 @@
len(data), 4,
'Server did not think security was successful.')
self.assertEqual(
- [ord_func(i) for i in data], [0, 0, 0, 0],
+ [int(i) for i in data], [0, 0, 0, 0],
'Server did not think security was successful.')
# Say to leave the desktop as shared as part of client initialization
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 4db6987..4527aa9 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -160,15 +160,6 @@
"""
self._test_reboot_server('HARD')
- @decorators.skip_because(bug="1014647")
- @decorators.idempotent_id('4640e3ef-a5df-482e-95a1-ceeeb0faa84d')
- def test_reboot_server_soft(self):
- """Test soft rebooting server
-
- The server should be signaled to reboot gracefully.
- """
- self._test_reboot_server('SOFT')
-
@decorators.idempotent_id('1d1c9104-1b0a-11e7-a3d4-fa163e65f5ce')
def test_remove_server_all_security_groups(self):
"""Test removing all security groups from server"""
@@ -237,7 +228,7 @@
# 4.Plain username/password auth, if a password was given.
linux_client = remote_client.RemoteClient(
self.get_server_ip(rebuilt_server, validation_resources),
- self.ssh_user,
+ self.ssh_alt_user,
password,
validation_resources['keypair']['private_key'],
server=rebuilt_server,
@@ -319,7 +310,7 @@
self.os_primary)
linux_client = remote_client.RemoteClient(
self.get_server_ip(server, validation_resources),
- self.ssh_user,
+ self.ssh_alt_user,
password=None,
pkey=validation_resources['keypair']['private_key'],
server=server,
diff --git a/tempest/api/compute/servers/test_server_metadata_negative.py b/tempest/api/compute/servers/test_server_metadata_negative.py
index a697b95..655909c 100644
--- a/tempest/api/compute/servers/test_server_metadata_negative.py
+++ b/tempest/api/compute/servers/test_server_metadata_negative.py
@@ -45,7 +45,7 @@
# Attempt to start a server with a meta-data key that is > 255
# characters
- # Tryset_server_metadata_item a few values
+ # Try create a server with the metadata for a few values
for sz in [256, 257, 511, 1023]:
key = "k" * sz
meta = {key: 'data1'}
@@ -86,11 +86,15 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('0025fbd6-a4ba-4cde-b8c2-96805dcfdabc')
- def test_wrong_key_passed_in_body(self):
+ def test_set_metadata_invalid_key(self):
"""Test setting server metadata item with wrong key in body
Raise BadRequest if key in uri does not match the key passed in body.
"""
+ if not CONF.compute_feature_enabled.xenapi_apis:
+ raise self.skipException(
+ 'Metadata is read-only on non-Xen-based deployments.')
+
meta = {'testkey': 'testvalue'}
self.assertRaises(lib_exc.BadRequest,
self.client.set_server_metadata_item,
diff --git a/tempest/api/compute/servers/test_server_personality.py b/tempest/api/compute/servers/test_server_personality.py
index ba2adbb..8a05e7a 100644
--- a/tempest/api/compute/servers/test_server_personality.py
+++ b/tempest/api/compute/servers/test_server_personality.py
@@ -29,6 +29,7 @@
class ServerPersonalityTestJSON(base.BaseV2ComputeTest):
"""Test servers with injected files"""
+ max_microversion = '2.56'
@classmethod
def setup_credentials(cls):
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index 5445113..c222893 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -16,6 +16,7 @@
import testtools
from tempest.api.compute import base
+from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
@@ -189,6 +190,7 @@
self._test_stable_device_rescue(server_id, rescue_image_id)
@decorators.idempotent_id('a3772b42-00bf-4310-a90b-1cc6fd3e7eab')
+ @utils.services('volume')
def test_stable_device_rescue_disk_virtio_with_volume_attached(self):
"""Test rescuing server with volume attached
@@ -214,6 +216,13 @@
min_microversion = '2.87'
+ @classmethod
+ def skip_checks(cls):
+ super(ServerBootFromVolumeStableRescueTest, cls).skip_checks()
+ if not CONF.service_available.cinder:
+ skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
+ raise cls.skipException(skip_msg)
+
@decorators.attr(type='slow')
@decorators.idempotent_id('48f123cb-922a-4065-8db6-b9a9074a556b')
def test_stable_device_rescue_bfv_blank_volume(self):
diff --git a/tempest/api/identity/admin/v3/test_application_credentials.py b/tempest/api/identity/admin/v3/test_application_credentials.py
index c9cafd8..f5b0356 100644
--- a/tempest/api/identity/admin/v3/test_application_credentials.py
+++ b/tempest/api/identity/admin/v3/test_application_credentials.py
@@ -37,7 +37,7 @@
secret = app_cred['secret']
# Check that the application credential is functional
- token_id, resp = self.non_admin_token.get_token(
+ _, resp = self.non_admin_token.get_token(
app_cred_id=app_cred['id'],
app_cred_secret=secret,
auth_data=True
diff --git a/tempest/api/identity/base.py b/tempest/api/identity/base.py
index 282343c..5722f0e 100644
--- a/tempest/api/identity/base.py
+++ b/tempest/api/identity/base.py
@@ -192,6 +192,7 @@
cls.os_primary.identity_versions_v3_client
cls.non_admin_app_creds_client = \
cls.os_primary.application_credentials_client
+ cls.non_admin_access_rules_client = cls.os_primary.access_rules_client
class BaseIdentityV3AdminTest(BaseIdentityV3Test):
diff --git a/tempest/api/identity/v3/test_access_rules.py b/tempest/api/identity/v3/test_access_rules.py
new file mode 100644
index 0000000..608eb59
--- /dev/null
+++ b/tempest/api/identity/v3/test_access_rules.py
@@ -0,0 +1,84 @@
+# Copyright 2019 SUSE LLC
+#
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.identity import base
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+
+CONF = config.CONF
+
+
+class AccessRulesV3Test(base.BaseIdentityV3Test):
+
+ @classmethod
+ def skip_checks(cls):
+ super(AccessRulesV3Test, cls).skip_checks()
+ if not CONF.identity_feature_enabled.access_rules:
+ raise cls.skipException("Application credential access rules are "
+ "not available in this environment")
+
+ @classmethod
+ def resource_setup(cls):
+ super(AccessRulesV3Test, cls).resource_setup()
+ cls.user_id = cls.os_primary.credentials.user_id
+ cls.project_id = cls.os_primary.credentials.project_id
+
+ def setUp(self):
+ super(AccessRulesV3Test, self).setUp()
+ ac = self.non_admin_app_creds_client
+ access_rules = [
+ {
+ "path": "/v2.1/servers/*/ips",
+ "method": "GET",
+ "service": "compute"
+ }
+ ]
+ self.app_cred = ac.create_application_credential(
+ self.user_id,
+ name=data_utils.rand_name('application_credential'),
+ access_rules=access_rules
+ )['application_credential']
+
+ @decorators.idempotent_id('2354c498-5119-4ba5-9f0d-44f16f78fb0e')
+ def test_list_access_rules(self):
+ ar = self.non_admin_access_rules_client.list_access_rules(self.user_id)
+ self.assertEqual(1, len(ar['access_rules']))
+
+ @decorators.idempotent_id('795dd507-ca1e-40e9-ba90-ff0a08689ba4')
+ def test_show_access_rule(self):
+ access_rule_id = self.app_cred['access_rules'][0]['id']
+ self.non_admin_access_rules_client.show_access_rule(
+ self.user_id, access_rule_id)
+
+ @decorators.idempotent_id('278757e9-e193-4bf8-adf2-0b0a229a17d0')
+ def test_delete_access_rule(self):
+ access_rule_id = self.app_cred['access_rules'][0]['id']
+ app_cred_id = self.app_cred['id']
+ self.assertRaises(
+ lib_exc.Forbidden,
+ self.non_admin_access_rules_client.delete_access_rule,
+ self.user_id,
+ access_rule_id)
+ self.non_admin_app_creds_client.delete_application_credential(
+ self.user_id, app_cred_id)
+ ar = self.non_admin_access_rules_client.list_access_rules(self.user_id)
+ self.assertEqual(1, len(ar['access_rules']))
+ self.non_admin_access_rules_client.delete_access_rule(
+ self.user_id, access_rule_id)
+ ar = self.non_admin_access_rules_client.list_access_rules(self.user_id)
+ self.assertEqual(0, len(ar['access_rules']))
diff --git a/tempest/api/identity/v3/test_application_credentials.py b/tempest/api/identity/v3/test_application_credentials.py
index ef1bbdf..06734aa 100644
--- a/tempest/api/identity/v3/test_application_credentials.py
+++ b/tempest/api/identity/v3/test_application_credentials.py
@@ -19,8 +19,11 @@
from oslo_utils import timeutils
from tempest.api.identity import base
+from tempest import config
from tempest.lib import decorators
+CONF = config.CONF
+
class ApplicationCredentialsV3Test(base.BaseApplicationCredentialsV3Test):
"""Test application credentials"""
@@ -48,7 +51,7 @@
self.assertNotIn('secret', app_cred)
# Check that the application credential is functional
- token_id, resp = self.non_admin_token.get_token(
+ _, resp = self.non_admin_token.get_token(
app_cred_id=app_cred['id'],
app_cred_secret=secret,
auth_data=True
@@ -65,6 +68,24 @@
expires_str = expires_at.isoformat()
self.assertEqual(expires_str, app_cred['expires_at'])
+ @decorators.idempotent_id('529936eb-aa5d-463d-9f79-01c113d3b88f')
+ def test_create_application_credential_access_rules(self):
+ if not CONF.identity_feature_enabled.access_rules:
+ raise self.skipException("Application credential access rules are "
+ "not available in this environment")
+ access_rules = [
+ {
+ "path": "/v2.1/servers/*/ips",
+ "method": "GET",
+ "service": "compute"
+ }
+ ]
+ app_cred = self.create_application_credential(
+ access_rules=access_rules)
+ access_rule_resp = app_cred['access_rules'][0]
+ access_rule_resp.pop('id')
+ self.assertDictEqual(access_rules[0], access_rule_resp)
+
@decorators.idempotent_id('ff0cd457-6224-46e7-b79e-0ada4964a8a6')
def test_list_application_credentials(self):
"""Test listing application credentials"""
diff --git a/tempest/api/identity/v3/test_ec2_credentials.py b/tempest/api/identity/v3/test_ec2_credentials.py
new file mode 100644
index 0000000..a2cbc4a
--- /dev/null
+++ b/tempest/api/identity/v3/test_ec2_credentials.py
@@ -0,0 +1,113 @@
+# Copyright 2020 SUSE LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.identity import base
+from tempest.common import utils
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+
+
+class EC2CredentialsTest(base.BaseIdentityV3Test):
+
+ @classmethod
+ def skip_checks(cls):
+ super(EC2CredentialsTest, cls).skip_checks()
+ if not utils.is_extension_enabled('OS-EC2', 'identity'):
+ msg = "OS-EC2 identity extension not enabled."
+ raise cls.skipException(msg)
+
+ @classmethod
+ def resource_setup(cls):
+ super(EC2CredentialsTest, cls).resource_setup()
+ cls.creds = cls.os_primary.credentials
+
+ @decorators.idempotent_id('b0f55a29-54e5-4166-999d-712347e0c920')
+ def test_create_ec2_credential(self):
+ """Create user ec2 credential."""
+ resp = self.non_admin_users_client.create_user_ec2_credential(
+ self.creds.user_id,
+ tenant_id=self.creds.tenant_id)["credential"]
+ access = resp['access']
+ self.addCleanup(
+ self.non_admin_users_client.delete_user_ec2_credential,
+ self.creds.user_id, access)
+ self.assertNotEmpty(resp['access'])
+ self.assertNotEmpty(resp['secret'])
+ self.assertEqual(self.creds.user_id, resp['user_id'])
+ self.assertEqual(self.creds.tenant_id, resp['tenant_id'])
+
+ @decorators.idempotent_id('897813f0-160c-4fdc-aabc-24ee635ce4a9')
+ def test_list_ec2_credentials(self):
+ """Get the list of user ec2 credentials."""
+ created_creds = []
+ # create first ec2 credentials
+ creds1 = self.non_admin_users_client.create_user_ec2_credential(
+ self.creds.user_id,
+ tenant_id=self.creds.tenant_id)["credential"]
+ created_creds.append(creds1['access'])
+ self.addCleanup(
+ self.non_admin_users_client.delete_user_ec2_credential,
+ self.creds.user_id, creds1['access'])
+
+ # create second ec2 credentials
+ creds2 = self.non_admin_users_client.create_user_ec2_credential(
+ self.creds.user_id,
+ tenant_id=self.creds.tenant_id)["credential"]
+ created_creds.append(creds2['access'])
+ self.addCleanup(
+ self.non_admin_users_client.delete_user_ec2_credential,
+ self.creds.user_id, creds2['access'])
+
+ # get the list of user ec2 credentials
+ resp = self.non_admin_users_client.list_user_ec2_credentials(
+ self.creds.user_id)["credentials"]
+ fetched_creds = [cred['access'] for cred in resp]
+ # created credentials should be in a fetched list
+ missing = [cred for cred in created_creds
+ if cred not in fetched_creds]
+ self.assertEmpty(missing,
+ "Failed to find ec2_credentials %s in fetched list" %
+ ', '.join(cred for cred in missing))
+
+ @decorators.idempotent_id('8b8d1010-5958-48df-a6cd-5e3df72e6bcf')
+ def test_show_ec2_credential(self):
+ """Get the definite user ec2 credential."""
+ resp = self.non_admin_users_client.create_user_ec2_credential(
+ self.creds.user_id,
+ tenant_id=self.creds.tenant_id)["credential"]
+ self.addCleanup(
+ self.non_admin_users_client.delete_user_ec2_credential,
+ self.creds.user_id, resp['access'])
+
+ ec2_creds = self.non_admin_users_client.show_user_ec2_credential(
+ self.creds.user_id, resp['access']
+ )["credential"]
+ for key in ['access', 'secret', 'user_id', 'tenant_id']:
+ self.assertEqual(ec2_creds[key], resp[key])
+
+ @decorators.idempotent_id('9408d61b-8be0-4a8d-9b85-14f61edb456b')
+ def test_delete_ec2_credential(self):
+ """Delete user ec2 credential."""
+ resp = self.non_admin_users_client.create_user_ec2_credential(
+ self.creds.user_id,
+ tenant_id=self.creds.tenant_id)["credential"]
+ access = resp['access']
+ self.non_admin_users_client.delete_user_ec2_credential(
+ self.creds.user_id, access)
+ self.assertRaises(
+ lib_exc.NotFound,
+ self.non_admin_users_client.show_user_ec2_credential,
+ self.creds.user_id,
+ access)
diff --git a/tempest/api/image/base.py b/tempest/api/image/base.py
index ae7b3e4..d3dc19a 100644
--- a/tempest/api/image/base.py
+++ b/tempest/api/image/base.py
@@ -18,6 +18,7 @@
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
+from tempest.lib import exceptions
import tempest.test
CONF = config.CONF
@@ -155,6 +156,15 @@
namespace_name)
return namespace
+ @classmethod
+ def get_available_stores(cls):
+ stores = []
+ try:
+ stores = cls.client.info_stores()['stores']
+ except exceptions.NotFound:
+ pass
+ return stores
+
class BaseV2MemberImageTest(BaseV2ImageTest):
diff --git a/tempest/api/image/v2/admin/test_images.py b/tempest/api/image/v2/admin/test_images.py
index 7e13d7f..ad68d82 100644
--- a/tempest/api/image/v2/admin/test_images.py
+++ b/tempest/api/image/v2/admin/test_images.py
@@ -13,10 +13,16 @@
# License for the specific language governing permissions and limitations
# under the License.
+import six
+
from tempest.api.image import base
+from tempest.common import waiters
+from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
+CONF = config.CONF
+
class BasicOperationsImagesAdminTest(base.BaseV2ImageAdminTest):
""""Test image operations about image owner"""
@@ -52,3 +58,65 @@
self.assertEqual(random_id_2, updated_image_info['owner'])
self.assertNotEqual(created_image_info['owner'],
updated_image_info['owner'])
+
+
+class ImportCopyImagesTest(base.BaseV2ImageAdminTest):
+ """Test the import copy-image operations"""
+
+ @classmethod
+ def skip_checks(cls):
+ super(ImportCopyImagesTest, cls).skip_checks()
+ if not CONF.image_feature_enabled.import_image:
+ skip_msg = (
+ "%s skipped as image import is not available" % cls.__name__)
+ raise cls.skipException(skip_msg)
+
+ @decorators.idempotent_id('9b3b644e-03d1-11eb-a036-fa163e2eaf49')
+ def test_image_copy_image_import(self):
+ """Test 'copy-image' import functionalities
+
+ Create image, import image with copy-image method and
+ verify that import succeeded.
+ """
+ available_stores = self.get_available_stores()
+ available_import_methods = self.client.info_import()[
+ 'import-methods']['value']
+ # NOTE(gmann): Skip if copy-image import method and multistore
+ # are not available.
+ if ('copy-image' not in available_import_methods or
+ not available_stores):
+ raise self.skipException('Either copy-image import method or '
+ 'multistore is not available')
+ uuid = data_utils.rand_uuid()
+ image_name = data_utils.rand_name('copy-image')
+ container_format = CONF.image.container_formats[0]
+ disk_format = CONF.image.disk_formats[0]
+ image = self.create_image(name=image_name,
+ container_format=container_format,
+ disk_format=disk_format,
+ visibility='private',
+ ramdisk_id=uuid)
+ self.assertEqual('queued', image['status'])
+
+ file_content = data_utils.random_bytes()
+ image_file = six.BytesIO(file_content)
+ self.client.store_image_file(image['id'], image_file)
+
+ body = self.client.show_image(image['id'])
+ self.assertEqual(image['id'], body['id'])
+ self.assertEqual(len(file_content), body.get('size'))
+ self.assertEqual('active', body['status'])
+
+ # Copy image to all the stores. In case of all_stores request
+ # glance will skip the stores where image is already available.
+ self.admin_client.image_import(image['id'], method='copy-image',
+ all_stores=True,
+ all_stores_must_succeed=False)
+
+ # Wait for copy to finished on all stores.
+ failed_stores = waiters.wait_for_image_copied_to_stores(
+ self.client, image['id'])
+ # Assert if copy is failed on any store.
+ self.assertEqual(0, len(failed_stores),
+ "Failed to copy the following stores: %s" %
+ str(failed_stores))
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index c1a7211..28299a4 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -20,6 +20,7 @@
from oslo_log import log as logging
from tempest.api.image import base
+from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
@@ -113,6 +114,95 @@
self.client.wait_for_resource_activation(image['id'])
+class MultiStoresImportImagesTest(base.BaseV2ImageTest):
+ """Test importing image in multiple stores"""
+ @classmethod
+ def skip_checks(cls):
+ super(MultiStoresImportImagesTest, cls).skip_checks()
+ if not CONF.image_feature_enabled.import_image:
+ skip_msg = (
+ "%s skipped as image import is not available" % cls.__name__)
+ raise cls.skipException(skip_msg)
+
+ @classmethod
+ def resource_setup(cls):
+ super(MultiStoresImportImagesTest, cls).resource_setup()
+ cls.available_import_methods = cls.client.info_import()[
+ 'import-methods']['value']
+ if not cls.available_import_methods:
+ raise cls.skipException('Server does not support '
+ 'any import method')
+
+ # NOTE(pdeore): Skip if glance-direct import method and mutlistore
+ # are not enabled/configured, or only one store is configured in
+ # multiple stores setup.
+ cls.available_stores = cls.get_available_stores()
+ if ('glance-direct' not in cls.available_import_methods or
+ not len(cls.available_stores) > 1):
+ raise cls.skipException(
+ 'Either glance-direct import method not present in %s or '
+ 'None or only one store is '
+ 'configured %s' % (cls.available_import_methods,
+ cls.available_stores))
+
+ def _create_and_stage_image(self, all_stores=False):
+ """Create Image & stage image file for glance-direct import method."""
+ image_name = data_utils.rand_name('test-image')
+ container_format = CONF.image.container_formats[0]
+ disk_format = CONF.image.disk_formats[0]
+ image = self.create_image(name=image_name,
+ container_format=container_format,
+ disk_format=disk_format,
+ visibility='private')
+ self.assertEqual('queued', image['status'])
+
+ self.client.stage_image_file(
+ image['id'],
+ six.BytesIO(data_utils.random_bytes(10485760)))
+ # Check image status is 'uploading'
+ body = self.client.show_image(image['id'])
+ self.assertEqual(image['id'], body['id'])
+ self.assertEqual('uploading', body['status'])
+
+ if all_stores:
+ stores_list = ','.join([store['id']
+ for store in self.available_stores])
+ else:
+ stores = [store['id'] for store in self.available_stores]
+ stores_list = stores[::len(stores) - 1]
+
+ return body, stores_list
+
+ @decorators.idempotent_id('bf04ff00-3182-47cb-833a-f1c6767b47fd')
+ def test_glance_direct_import_image_to_all_stores(self):
+ """Test image is imported in all available stores
+
+ Create image, import image to all available stores using glance-direct
+ import method and verify that import succeeded.
+ """
+ image, stores = self._create_and_stage_image(all_stores=True)
+
+ self.client.image_import(
+ image['id'], method='glance-direct', all_stores=True)
+
+ waiters.wait_for_image_imported_to_stores(self.client,
+ image['id'], stores)
+
+ @decorators.idempotent_id('82fb131a-dd2b-11ea-aec7-340286b6c574')
+ def test_glance_direct_import_image_to_specific_stores(self):
+ """Test image is imported in all available stores
+
+ Create image, import image to specified store(s) using glance-direct
+ import method and verify that import succeeded.
+ """
+ image, stores = self._create_and_stage_image()
+ self.client.image_import(image['id'], method='glance-direct',
+ stores=stores)
+
+ waiters.wait_for_image_imported_to_stores(self.client, image['id'],
+ (','.join(stores)))
+
+
class BasicOperationsImagesTest(base.BaseV2ImageTest):
"""Here we test the basic operations of images"""
diff --git a/tempest/api/network/test_floating_ips.py b/tempest/api/network/test_floating_ips.py
index c32d3c1..eb31d24 100644
--- a/tempest/api/network/test_floating_ips.py
+++ b/tempest/api/network/test_floating_ips.py
@@ -66,7 +66,7 @@
cls.create_router_interface(cls.router['id'], cls.subnet['id'])
# Create two ports one each for Creation and Updating of floatingIP
cls.ports = []
- for i in range(2):
+ for _ in range(2):
port = cls.create_port(cls.network)
cls.ports.append(port)
diff --git a/tempest/api/object_storage/test_account_services.py b/tempest/api/object_storage/test_account_services.py
index c5c30e3..da8ad66 100644
--- a/tempest/api/object_storage/test_account_services.py
+++ b/tempest/api/object_storage/test_account_services.py
@@ -28,6 +28,7 @@
class AccountTest(base.BaseObjectTest):
+ """Test account metadata and containers"""
credentials = [['operator', CONF.object_storage.operator_role],
['operator_alt', CONF.object_storage.operator_role]]
@@ -54,7 +55,7 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('3499406a-ae53-4f8c-b43a-133d4dc6fe3f')
def test_list_containers(self):
- # list of all containers should not be empty
+ """Test listing containers"""
resp, container_list = self.account_client.list_account_containers()
self.assertHeaders(resp, 'Account', 'GET')
@@ -66,11 +67,10 @@
@decorators.idempotent_id('884ec421-fbad-4fcc-916b-0580f2699565')
def test_list_no_containers(self):
- # List request to empty account
+ """Test listing containers for an account without container"""
# To test listing no containers, create new user other than
# the base user of this instance.
-
resp, container_list = \
self.os_operator.account_client.list_account_containers()
@@ -103,7 +103,7 @@
@decorators.idempotent_id('1c7efa35-e8a2-4b0b-b5ff-862c7fd83704')
def test_list_containers_with_format_json(self):
- # list containers setting format parameter to 'json'
+ """Test listing containers setting format parameter to 'json'"""
params = {'format': 'json'}
resp, container_list = self.account_client.list_account_containers(
params=params)
@@ -115,7 +115,7 @@
@decorators.idempotent_id('4477b609-1ca6-4d4b-b25d-ad3f01086089')
def test_list_containers_with_format_xml(self):
- # list containers setting format parameter to 'xml'
+ """Test listing containers setting format parameter to 'xml'"""
params = {'format': 'xml'}
resp, container_list = self.account_client.list_account_containers(
params=params)
@@ -133,13 +133,18 @@
not CONF.object_storage_feature_enabled.discoverability,
'Discoverability function is disabled')
def test_list_extensions(self):
+ """Test listing capabilities"""
resp = self.capabilities_client.list_capabilities()
self.assertThat(resp, custom_matchers.AreAllWellFormatted())
@decorators.idempotent_id('5cfa4ab2-4373-48dd-a41f-a532b12b08b2')
def test_list_containers_with_limit(self):
- # list containers one of them, half of them then all of them
+ """Test listing containers with limit parameter
+
+ Listing containers limited to one of them, half of them, and then all
+ of them.
+ """
for limit in (1, self.containers_count // 2,
self.containers_count):
params = {'limit': limit}
@@ -151,10 +156,11 @@
@decorators.idempotent_id('638f876d-6a43-482a-bbb3-0840bca101c6')
def test_list_containers_with_marker(self):
- # list containers using marker param
- # first expect to get 0 container as we specified last
- # the container as marker
- # second expect to get the bottom half of the containers
+ """Test listing containers with marker parameter
+
+ First expect to get 0 container as we specified the last container
+ as marker, second expect to get the bottom half of the containers.
+ """
params = {'marker': self.containers[-1]}
resp, container_list = \
self.account_client.list_account_containers(params=params)
@@ -172,10 +178,11 @@
@decorators.idempotent_id('5ca164e4-7bde-43fa-bafb-913b53b9e786')
def test_list_containers_with_end_marker(self):
- # list containers using end_marker param
- # first expect to get 0 container as we specified first container as
- # end_marker
- # second expect to get the top half of the containers
+ """Test listing containers with end_marker parameter
+
+ First expect to get 0 container as we specified first container as
+ end_marker, second expect to get the top half of the containers
+ """
params = {'end_marker': self.containers[0]}
resp, container_list = \
self.account_client.list_account_containers(params=params)
@@ -190,7 +197,12 @@
@decorators.idempotent_id('ac8502c2-d4e4-4f68-85a6-40befea2ef5e')
def test_list_containers_with_marker_and_end_marker(self):
- # list containers combining marker and end_marker param
+ """Test listing containers with marker and end_marker parameter
+
+ If we use the first container as marker, and the last container as
+ end_marker, then we should get all containers excluding the first one
+ and the last one.
+ """
params = {'marker': self.containers[0],
'end_marker': self.containers[self.containers_count - 1]}
resp, container_list = self.account_client.list_account_containers(
@@ -200,8 +212,10 @@
@decorators.idempotent_id('f7064ae8-dbcc-48da-b594-82feef6ea5af')
def test_list_containers_with_limit_and_marker(self):
- # list containers combining marker and limit param
- # result are always limitated by the limit whatever the marker
+ """Test listing containers combining marker and limit parameter
+
+ Result are always limited by the limit whatever the marker.
+ """
for marker in random.choice(self.containers):
limit = random.randint(0, self.containers_count - 1)
params = {'marker': marker,
@@ -215,6 +229,10 @@
@decorators.idempotent_id('888a3f0e-7214-4806-8e50-5e0c9a69bb5e')
def test_list_containers_with_limit_and_end_marker(self):
+ """Test listing containers combining end_marker and limit parameter
+
+ Result are always limited by the limit whatever the end_marker.
+ """
# list containers combining limit and end_marker param
limit = random.randint(1, self.containers_count)
params = {'limit': limit,
@@ -227,7 +245,11 @@
@decorators.idempotent_id('8cf98d9c-e3a0-4e44-971b-c87656fdddbd')
def test_list_containers_with_limit_and_marker_and_end_marker(self):
- # list containers combining limit, marker and end_marker param
+ """Test listing containers combining marker and end_marker and limit
+
+ Result are always limited by the limit whatever the marker and the
+ end_marker.
+ """
limit = random.randint(1, self.containers_count)
params = {'limit': limit,
'marker': self.containers[0],
@@ -240,7 +262,7 @@
@decorators.idempotent_id('365e6fc7-1cfe-463b-a37c-8bd08d47b6aa')
def test_list_containers_with_prefix(self):
- # list containers that have a name that starts with a prefix
+ """Test listing containers that have a name starting with a prefix"""
prefix = 'tempest-a'
params = {'prefix': prefix}
resp, container_list = self.account_client.list_account_containers(
@@ -252,7 +274,7 @@
@decorators.idempotent_id('b1811cff-d1ed-4c15-a52e-efd8de41cf34')
def test_list_containers_reverse_order(self):
- # list containers in reverse order
+ """Test listing containers in reverse order"""
_, orig_container_list = self.account_client.list_account_containers()
params = {'reverse': True}
@@ -265,8 +287,7 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('4894c312-6056-4587-8d6f-86ffbf861f80')
def test_list_account_metadata(self):
- # list all account metadata
-
+ """Test listing account metadata"""
# set metadata to account
metadata = {'test-account-meta1': 'Meta1',
'test-account-meta2': 'Meta2'}
@@ -282,14 +303,14 @@
@decorators.idempotent_id('b904c2e3-24c2-4dba-ad7d-04e90a761be5')
def test_list_no_account_metadata(self):
- # list no account metadata
+ """Test listing account metadata for account without metadata"""
resp, _ = self.account_client.list_account_metadata()
self.assertHeaders(resp, 'Account', 'HEAD')
self.assertNotIn('x-account-meta-', str(resp))
@decorators.idempotent_id('e2a08b5f-3115-4768-a3ee-d4287acd6c08')
def test_update_account_metadata_with_create_metadata(self):
- # add metadata to account
+ """Test adding metadata to account"""
metadata = {'test-account-meta1': 'Meta1'}
resp, _ = self.account_client.create_update_or_delete_account_metadata(
create_update_metadata=metadata)
@@ -305,7 +326,7 @@
@decorators.idempotent_id('9f60348d-c46f-4465-ae06-d51dbd470953')
def test_update_account_metadata_with_delete_metadata(self):
- # delete metadata from account
+ """Test deleting metadata from account"""
metadata = {'test-account-meta1': 'Meta1'}
self.account_client.create_update_or_delete_account_metadata(
create_update_metadata=metadata)
@@ -318,8 +339,11 @@
@decorators.idempotent_id('64fd53f3-adbd-4639-af54-436e4982dbfb')
def test_update_account_metadata_with_create_metadata_key(self):
- # if the value of metadata is not set, the metadata is not
- # registered at a server
+ """Test adding metadata to account with empty value
+
+ Adding metadata with empty value to account, the metadata is not
+ registered.
+ """
metadata = {'test-account-meta1': ''}
resp, _ = self.account_client.create_update_or_delete_account_metadata(
create_update_metadata=metadata)
@@ -330,8 +354,11 @@
@decorators.idempotent_id('d4d884d3-4696-4b85-bc98-4f57c4dd2bf1')
def test_update_account_metadata_with_delete_metadata_key(self):
- # Although the value of metadata is not set, the feature of
- # deleting metadata is valid
+ """Test deleting metadata from account with empty value
+
+ Although the value of metadata is not set, the feature of deleting
+ metadata is valid, so the metadata is removed from account.
+ """
metadata_1 = {'test-account-meta1': 'Meta1'}
self.account_client.create_update_or_delete_account_metadata(
create_update_metadata=metadata_1)
@@ -345,7 +372,11 @@
@decorators.idempotent_id('8e5fc073-59b9-42ee-984a-29ed11b2c749')
def test_update_account_metadata_with_create_and_delete_metadata(self):
- # Send a request adding and deleting metadata requests simultaneously
+ """Test adding and deleting metadata simultaneously
+
+ Send a request adding and deleting metadata requests simultaneously,
+ both adding and deleting of metadata will succeed.
+ """
metadata_1 = {'test-account-meta1': 'Meta1'}
self.account_client.create_update_or_delete_account_metadata(
create_update_metadata=metadata_1)
diff --git a/tempest/api/object_storage/test_container_sync.py b/tempest/api/object_storage/test_container_sync.py
index bdcb5ae..eb2ef7f 100644
--- a/tempest/api/object_storage/test_container_sync.py
+++ b/tempest/api/object_storage/test_container_sync.py
@@ -33,6 +33,8 @@
class ContainerSyncTest(base.BaseObjectTest):
+ """Test container synchronization"""
+
credentials = [['operator', CONF.object_storage.operator_role],
['operator_alt', CONF.object_storage.operator_role]]
@@ -90,7 +92,7 @@
# create object in container
object_name = data_utils.rand_name(name='TestSyncObject')
data = object_name[::-1].encode() # Raw data, we need bytes
- resp, _ = obj_client[0].create_object(cont[0], object_name, data)
+ obj_client[0].create_object(cont[0], object_name, data)
self.objects.append(object_name)
# wait until container contents list is not empty
@@ -129,6 +131,7 @@
not CONF.object_storage_feature_enabled.container_sync,
'Old-style container sync function is disabled')
def test_container_synchronization(self):
+ """Test container synchronization"""
def make_headers(cont, cont_client):
# tell first container to synchronize to a second
client_proxy_ip = \
diff --git a/tempest/api/object_storage/test_container_sync_middleware.py b/tempest/api/object_storage/test_container_sync_middleware.py
index e77b079..db6cfa4 100644
--- a/tempest/api/object_storage/test_container_sync_middleware.py
+++ b/tempest/api/object_storage/test_container_sync_middleware.py
@@ -27,6 +27,7 @@
class ContainerSyncMiddlewareTest(test_container_sync.ContainerSyncTest):
+ """Test containers synchronization specifying realm and cluster"""
@classmethod
def resource_setup(cls):
@@ -41,6 +42,7 @@
@decorators.idempotent_id('ea4645a1-d147-4976-82f7-e5a7a3065f80')
@utils.requires_ext(extension='container_sync', service='object')
def test_container_synchronization(self):
+ """Test container synchronization specifying realm and cluster"""
def make_headers(cont, cont_client):
# tell first container to synchronize to a second
account_name = cont_client.base_url.split('/')[-1]
diff --git a/tempest/api/object_storage/test_crossdomain.py b/tempest/api/object_storage/test_crossdomain.py
index 1567e06..365dc78 100644
--- a/tempest/api/object_storage/test_crossdomain.py
+++ b/tempest/api/object_storage/test_crossdomain.py
@@ -19,6 +19,7 @@
class CrossdomainTest(base.BaseObjectTest):
+ """Test crossdomain policy"""
@classmethod
def resource_setup(cls):
@@ -31,12 +32,10 @@
cls.xml_end = "</cross-domain-policy>"
- def setUp(self):
- super(CrossdomainTest, self).setUp()
-
@decorators.idempotent_id('d1b8b031-b622-4010-82f9-ff78a9e915c7')
@utils.requires_ext(extension='crossdomain', service='object')
def test_get_crossdomain_policy(self):
+ """Test getting crossdomain policy"""
url = self.account_client._get_base_version_url() + "crossdomain.xml"
resp, body = self.account_client.raw_request(url, "GET")
self.account_client._error_checker(resp, body)
diff --git a/tempest/api/object_storage/test_healthcheck.py b/tempest/api/object_storage/test_healthcheck.py
index f5e2443..d4a6a9f 100644
--- a/tempest/api/object_storage/test_healthcheck.py
+++ b/tempest/api/object_storage/test_healthcheck.py
@@ -21,9 +21,6 @@
class HealthcheckTest(base.BaseObjectTest):
"""Test healthcheck"""
- def setUp(self):
- super(HealthcheckTest, self).setUp()
-
@decorators.idempotent_id('db5723b1-f25c-49a9-bfeb-7b5640caf337')
def test_get_healthcheck(self):
"""Test getting healthcheck"""
diff --git a/tempest/api/object_storage/test_object_expiry.py b/tempest/api/object_storage/test_object_expiry.py
index 86f7c8c..6f6e32f 100644
--- a/tempest/api/object_storage/test_object_expiry.py
+++ b/tempest/api/object_storage/test_object_expiry.py
@@ -21,6 +21,8 @@
class ObjectExpiryTest(base.BaseObjectTest):
+ """Test object expiry"""
+
@classmethod
def resource_setup(cls):
super(ObjectExpiryTest, cls).resource_setup()
@@ -83,6 +85,7 @@
@decorators.idempotent_id('fb024a42-37f3-4ba5-9684-4f40a7910b41')
def test_get_object_after_expiry_time(self):
+ """Test object is expired after x-delete-after time"""
# the 10s is important, because the get calls can take 3s each
# some times
metadata = {'X-Delete-After': '10'}
@@ -90,5 +93,6 @@
@decorators.idempotent_id('e592f18d-679c-48fe-9e36-4be5f47102c5')
def test_get_object_at_expiry_time(self):
+ """Test object is expired at x-delete-at time"""
metadata = {'X-Delete-At': str(int(time.time()) + 10)}
self._test_object_expiry(metadata)
diff --git a/tempest/api/object_storage/test_object_services.py b/tempest/api/object_storage/test_object_services.py
index acb578d..fc9b1a2 100644
--- a/tempest/api/object_storage/test_object_services.py
+++ b/tempest/api/object_storage/test_object_services.py
@@ -29,6 +29,7 @@
class ObjectTest(base.BaseObjectTest):
+ """Test storage object"""
@classmethod
def resource_setup(cls):
@@ -78,6 +79,7 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('5b4ce26f-3545-46c9-a2ba-5754358a4c62')
def test_create_object(self):
+ """Test creating object and checking the object's uploaded content"""
# create object
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes()
@@ -97,7 +99,7 @@
@decorators.idempotent_id('5daebb1d-f0d5-4dc9-b541-69672eff00b0')
def test_create_object_with_content_disposition(self):
- # create object with content_disposition
+ """Test creating object with content-disposition"""
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes()
metadata = {}
@@ -119,7 +121,7 @@
@decorators.idempotent_id('605f8317-f945-4bee-ae91-013f1da8f0a0')
def test_create_object_with_content_encoding(self):
- # create object with content_encoding
+ """Test creating object with content-encoding"""
object_name = data_utils.rand_name(name='TestObject')
# put compressed string
@@ -146,7 +148,7 @@
@decorators.idempotent_id('73820093-0503-40b1-a478-edf0e69c7d1f')
def test_create_object_with_etag(self):
- # create object with etag
+ """Test creating object with Etag"""
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes()
md5 = hashlib.md5(data).hexdigest()
@@ -165,8 +167,7 @@
@decorators.idempotent_id('84dafe57-9666-4f6d-84c8-0814d37923b8')
def test_create_object_with_expect_continue(self):
- # create object with expect_continue
-
+ """Test creating object with expect_continue"""
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes()
@@ -181,8 +182,9 @@
self.assertEqual(data, body)
@decorators.idempotent_id('4f84422a-e2f2-4403-b601-726a4220b54e')
+ @decorators.skip_because(bug='1905432')
def test_create_object_with_transfer_encoding(self):
- # create object with transfer_encoding
+ """Test creating object with transfer_encoding"""
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes(1024)
headers = {'Transfer-Encoding': 'chunked'}
@@ -202,7 +204,10 @@
@decorators.idempotent_id('0f3d62a6-47e3-4554-b0e5-1a5dc372d501')
def test_create_object_with_x_fresh_metadata(self):
- # create object with x_fresh_metadata
+ """Test creating object with x-fresh-metadata
+
+ The previous added metadata will be cleared.
+ """
object_name_base = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes()
metadata_1 = {'X-Object-Meta-test-meta': 'Meta'}
@@ -228,7 +233,7 @@
@decorators.idempotent_id('1c7ed3e4-2099-406b-b843-5301d4811baf')
def test_create_object_with_x_object_meta(self):
- # create object with object_meta
+ """Test creating object with x-object-meta"""
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes()
metadata = {'X-Object-Meta-test-meta': 'Meta'}
@@ -247,7 +252,7 @@
@decorators.idempotent_id('e4183917-33db-4153-85cc-4dacbb938865')
def test_create_object_with_x_object_metakey(self):
- # create object with the blank value of metadata
+ """Test creating object with the blank value of metadata"""
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes()
metadata = {'X-Object-Meta-test-meta': ''}
@@ -266,7 +271,10 @@
@decorators.idempotent_id('ce798afc-b278-45de-a5ce-2ea124b98b99')
def test_create_object_with_x_remove_object_meta(self):
- # create object with x_remove_object_meta
+ """Test creating object with x-remove-object-meta
+
+ The metadata will be removed from the object.
+ """
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes()
metadata_add = {'X-Object-Meta-test-meta': 'Meta'}
@@ -289,7 +297,11 @@
@decorators.idempotent_id('ad21e342-7916-4f9e-ab62-a1f885f2aaf9')
def test_create_object_with_x_remove_object_metakey(self):
- # create object with the blank value of remove metadata
+ """Test creating object with the blank value of remove metadata
+
+ Creating object with blank metadata 'X-Remove-Object-Meta-test-meta',
+ metadata 'x-object-meta-test-meta' will be removed from the object.
+ """
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes()
metadata_add = {'X-Object-Meta-test-meta': 'Meta'}
@@ -312,7 +324,7 @@
@decorators.idempotent_id('17738d45-03bd-4d45-9e0b-7b2f58f98687')
def test_delete_object(self):
- # create object
+ """Test deleting object"""
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes()
resp, _ = self.object_client.create_object(self.container_name,
@@ -325,7 +337,7 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('7a94c25d-66e6-434c-9c38-97d4e2c29945')
def test_update_object_metadata(self):
- # update object metadata
+ """Test updating object metadata"""
object_name, _ = self.create_object(self.container_name)
metadata = {'X-Object-Meta-test-meta': 'Meta'}
@@ -343,7 +355,7 @@
@decorators.idempotent_id('48650ed0-c189-4e1e-ad6b-1d4770c6e134')
def test_update_object_metadata_with_remove_metadata(self):
- # update object metadata with remove metadata
+ """Test updating object metadata with remove metadata"""
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes()
create_metadata = {'X-Object-Meta-test-meta1': 'Meta1'}
@@ -366,6 +378,11 @@
@decorators.idempotent_id('f726174b-2ded-4708-bff7-729d12ce1f84')
def test_update_object_metadata_with_create_and_remove_metadata(self):
+ """Test updating object with creation and deletion of metadata
+
+ Update object with creation and deletion of metadata with one
+ request, both operations will succeed.
+ """
# creation and deletion of metadata with one request
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes()
@@ -392,8 +409,7 @@
@decorators.idempotent_id('08854588-6449-4bb7-8cca-f2e1040f5e6f')
def test_update_object_metadata_with_x_object_manifest(self):
- # update object metadata with x_object_manifest
-
+ """Test updating object metadata with x_object_manifest"""
# uploading segments
object_name, _ = self._upload_segments()
# creating a manifest file
@@ -418,7 +434,7 @@
@decorators.idempotent_id('0dbbe89c-6811-4d84-a2df-eca2bdd40c0e')
def test_update_object_metadata_with_x_object_metakey(self):
- # update object metadata with a blank value of metadata
+ """Test updating object metadata with a blank value of metadata"""
object_name, _ = self.create_object(self.container_name)
update_metadata = {'X-Object-Meta-test-meta': ''}
@@ -436,7 +452,7 @@
@decorators.idempotent_id('9a88dca4-b684-425b-806f-306cd0e57e42')
def test_update_object_metadata_with_x_remove_object_metakey(self):
- # update object metadata with a blank value of remove metadata
+ """Test updating object metadata with blank remove metadata value"""
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.arbitrary_string()
create_metadata = {'X-Object-Meta-test-meta': 'Meta'}
@@ -460,7 +476,7 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('9a447cf6-de06-48de-8226-a8c6ed31caf2')
def test_list_object_metadata(self):
- # get object metadata
+ """Test listing object metadata"""
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes()
metadata = {'X-Object-Meta-test-meta': 'Meta'}
@@ -478,7 +494,7 @@
@decorators.idempotent_id('170fb90e-f5c3-4b1f-ae1b-a18810821172')
def test_list_no_object_metadata(self):
- # get empty list of object metadata
+ """Test listing object metadata for object without metadata"""
object_name, _ = self.create_object(self.container_name)
resp, _ = self.object_client.list_object_metadata(
@@ -489,8 +505,7 @@
@decorators.idempotent_id('23a3674c-d6de-46c3-86af-ff92bfc8a3da')
def test_list_object_metadata_with_x_object_manifest(self):
- # get object metadata with x_object_manifest
-
+ """Test getting object metadata with x_object_manifest"""
# uploading segments
object_name, _ = self._upload_segments()
# creating a manifest file
@@ -530,7 +545,7 @@
@decorators.attr(type='smoke')
@decorators.idempotent_id('02610ba7-86b7-4272-9ed8-aa8d417cb3cd')
def test_get_object(self):
- # retrieve object's data (in response body)
+ """Test retrieving object's data (in response body)"""
# create object
object_name, data = self.create_object(self.container_name)
@@ -543,7 +558,7 @@
@decorators.idempotent_id('005f9bf6-e06d-41ec-968e-96c78e0b1d82')
def test_get_object_with_metadata(self):
- # get object with metadata
+ """Test getting object with metadata"""
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes()
metadata = {'X-Object-Meta-test-meta': 'Meta'}
@@ -562,7 +577,7 @@
@decorators.idempotent_id('05a1890e-7db9-4a6c-90a8-ce998a2bddfa')
def test_get_object_with_range(self):
- # get object with range
+ """Test getting object with range"""
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes(100)
self.object_client.create_object(self.container_name,
@@ -580,7 +595,7 @@
@decorators.idempotent_id('11b4515b-7ba7-4ca8-8838-357ded86fc10')
def test_get_object_with_x_object_manifest(self):
- # get object with x_object_manifest
+ """Test getting object with x_object_manifest"""
# uploading segments
object_name, data_segments = self._upload_segments()
@@ -623,7 +638,7 @@
@decorators.idempotent_id('c05b4013-e4de-47af-be84-e598062b16fc')
def test_get_object_with_if_match(self):
- # get object with if_match
+ """Test getting object with if_match"""
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes(10)
create_md5 = hashlib.md5(data).hexdigest()
@@ -643,7 +658,7 @@
@decorators.idempotent_id('be133639-e5d2-4313-9b1f-2d59fc054a16')
def test_get_object_with_if_modified_since(self):
- # get object with if_modified_since
+ """Test getting object with if_modified_since"""
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes()
time_now = time.time()
@@ -663,7 +678,7 @@
@decorators.idempotent_id('641500d5-1612-4042-a04d-01fc4528bc30')
def test_get_object_with_if_none_match(self):
- # get object with if_none_match
+ """Test getting object with if_none_match"""
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes()
create_md5 = hashlib.md5(data).hexdigest()
@@ -685,7 +700,7 @@
@decorators.idempotent_id('0aa1201c-10aa-467a-bee7-63cbdd463152')
def test_get_object_with_if_unmodified_since(self):
- # get object with if_unmodified_since
+ """Test getting object with if_unmodified_since"""
object_name, data = self.create_object(self.container_name)
time_now = time.time()
@@ -700,7 +715,7 @@
@decorators.idempotent_id('94587078-475f-48f9-a40f-389c246e31cd')
def test_get_object_with_x_newest(self):
- # get object with x_newest
+ """Test getting object with x_newest"""
object_name, data = self.create_object(self.container_name)
list_metadata = {'X-Newest': 'true'}
@@ -713,6 +728,7 @@
@decorators.idempotent_id('1a9ab572-1b66-4981-8c21-416e2a5e6011')
def test_copy_object_in_same_container(self):
+ """Test copying object to another object in same container"""
# create source object
src_object_name = data_utils.rand_name(name='SrcObject')
src_data = data_utils.random_bytes(size=len(src_object_name) * 2)
@@ -742,7 +758,7 @@
@decorators.idempotent_id('2248abba-415d-410b-9c30-22dff9cd6e67')
def test_copy_object_to_itself(self):
- # change the content type of an existing object
+ """Test changing the content type of an existing object"""
# create object
object_name, _ = self.create_object(self.container_name)
@@ -755,11 +771,11 @@
headers = {}
headers['X-Copy-From'] = "%s/%s" % (str(self.container_name),
str(object_name))
- resp, body = self.object_client.create_object(self.container_name,
- object_name,
- data=None,
- metadata=metadata,
- headers=headers)
+ resp, _ = self.object_client.create_object(self.container_name,
+ object_name,
+ data=None,
+ metadata=metadata,
+ headers=headers)
self.assertHeaders(resp, 'Object', 'PUT')
# check the content type
@@ -769,6 +785,7 @@
@decorators.idempotent_id('06f90388-2d0e-40aa-934c-e9a8833e958a')
def test_copy_object_2d_way(self):
+ """Test copying object's data to the new object using COPY"""
# create source object
src_object_name = data_utils.rand_name(name='SrcObject')
src_data = data_utils.random_bytes(size=len(src_object_name) * 2)
@@ -793,6 +810,7 @@
@decorators.idempotent_id('aa467252-44f3-472a-b5ae-5b57c3c9c147')
def test_copy_object_across_containers(self):
+ """Test copying object to another container"""
# create a container to use as a source container
src_container_name = data_utils.rand_name(name='TestSourceContainer')
self.container_client.update_container(src_container_name)
@@ -837,6 +855,7 @@
@decorators.idempotent_id('5a9e2cc6-85b6-46fc-916d-0cbb7a88e5fd')
def test_copy_object_with_x_fresh_metadata(self):
+ """Test copying objectwith x_fresh_metadata"""
# create source object
metadata = {'x-object-meta-src': 'src_value'}
src_object_name, data = self.create_object(self.container_name,
@@ -858,6 +877,7 @@
@decorators.idempotent_id('a28a8b99-e701-4d7e-9d84-3b66f121460b')
def test_copy_object_with_x_object_metakey(self):
+ """Test copying object with x_object_metakey"""
# create source object
metadata = {'x-object-meta-src': 'src_value'}
src_obj_name, data = self.create_object(self.container_name,
@@ -881,6 +901,7 @@
@decorators.idempotent_id('edabedca-24c3-4322-9b70-d6d9f942a074')
def test_copy_object_with_x_object_meta(self):
+ """Test copying object with x_object_meta"""
# create source object
metadata = {'x-object-meta-src': 'src_value'}
src_obj_name, data = self.create_object(self.container_name,
@@ -904,6 +925,7 @@
@decorators.idempotent_id('e3e6a64a-9f50-4955-b987-6ce6767c97fb')
def test_object_upload_in_segments(self):
+ """Test uploading object in segments"""
# create object
object_name = data_utils.rand_name(name='LObject')
data = data_utils.arbitrary_string()
@@ -947,10 +969,13 @@
@decorators.idempotent_id('50d01f12-526f-4360-9ac2-75dd508d7b68')
def test_get_object_if_different(self):
- # http://en.wikipedia.org/wiki/HTTP_ETag
- # Make a conditional request for an object using the If-None-Match
- # header, it should get downloaded only if the local file is different,
- # otherwise the response code should be 304 Not Modified
+ """Test getting object content only when the local file is different
+
+ http://en.wikipedia.org/wiki/HTTP_ETag
+ Make a conditional request for an object using the If-None-Match
+ header, it should get downloaded only if the local file is different,
+ otherwise the response code should be 304 Not Modified
+ """
object_name, data = self.create_object(self.container_name)
# local copy is identical, no download
md5 = hashlib.md5(data).hexdigest()
@@ -975,6 +1000,7 @@
class PublicObjectTest(base.BaseObjectTest):
+ """Test public storage object"""
credentials = [['operator', CONF.object_storage.operator_role],
['operator_alt', CONF.object_storage.operator_role]]
@@ -1000,9 +1026,11 @@
@decorators.idempotent_id('07c9cf95-c0d4-4b49-b9c8-0ef2c9b27193')
def test_access_public_container_object_without_using_creds(self):
- # make container public-readable and access an object in it object
- # anonymously, without using credentials
+ """Test accessing public container object without using credentials
+ Make container public-readable and access an object in it object
+ anonymously, without using credentials.
+ """
# update container metadata to make it publicly readable
cont_headers = {'X-Container-Read': '.r:*,.rlistings'}
resp_meta, body = (
@@ -1040,8 +1068,11 @@
@decorators.idempotent_id('54e2a2fe-42dc-491b-8270-8e4217dd4cdc')
def test_access_public_object_with_another_user_creds(self):
- # make container public-readable and access an object in it using
- # another user's credentials
+ """Test accessing public object with another user's credentials
+
+ Make container public-readable and access an object in it using
+ another user's credentials.
+ """
cont_headers = {'X-Container-Read': '.r:*,.rlistings'}
resp_meta, body = (
self.container_client.create_update_or_delete_container_metadata(
diff --git a/tempest/api/object_storage/test_object_slo.py b/tempest/api/object_storage/test_object_slo.py
index 8bb2e6e..664bbc8 100644
--- a/tempest/api/object_storage/test_object_slo.py
+++ b/tempest/api/object_storage/test_object_slo.py
@@ -27,6 +27,7 @@
class ObjectSloTest(base.BaseObjectTest):
+ """Test static large object"""
def setUp(self):
super(ObjectSloTest, self).setUp()
@@ -108,7 +109,7 @@
@decorators.idempotent_id('2c3f24a6-36e8-4711-9aa2-800ee1fc7b5b')
@utils.requires_ext(extension='slo', service='object')
def test_upload_manifest(self):
- # create static large object from multipart manifest
+ """Test creating static large object from multipart manifest"""
manifest = self._create_manifest()
params = {'multipart-manifest': 'put'}
@@ -123,7 +124,10 @@
@decorators.idempotent_id('e69ad766-e1aa-44a2-bdd2-bf62c09c1456')
@utils.requires_ext(extension='slo', service='object')
def test_list_large_object_metadata(self):
- # list static large object metadata using multipart manifest
+ """Test listing static large object metadata
+
+ List static large object metadata using multipart manifest
+ """
object_name = self._create_large_object()
resp, _ = self.object_client.list_object_metadata(
@@ -135,7 +139,7 @@
@decorators.idempotent_id('49bc49bc-dd1b-4c0f-904e-d9f10b830ee8')
@utils.requires_ext(extension='slo', service='object')
def test_retrieve_large_object(self):
- # list static large object using multipart manifest
+ """Test listing static large object using multipart manifest"""
object_name = self._create_large_object()
resp, body = self.object_client.get_object(
@@ -150,7 +154,7 @@
@decorators.idempotent_id('87b6dfa1-abe9-404d-8bf0-6c3751e6aa77')
@utils.requires_ext(extension='slo', service='object')
def test_delete_large_object(self):
- # delete static large object using multipart manifest
+ """Test deleting static large object using multipart manifest"""
object_name = self._create_large_object()
params_del = {'multipart-manifest': 'delete'}
@@ -161,6 +165,6 @@
self.assertHeaders(resp, 'Object', 'DELETE')
- resp, body = self.container_client.list_container_objects(
+ resp, _ = self.container_client.list_container_objects(
self.container_name)
self.assertEqual(int(resp['x-container-object-count']), 0)
diff --git a/tempest/api/volume/test_volumes_negative.py b/tempest/api/volume/test_volumes_negative.py
index 76c22f0..389d3be 100644
--- a/tempest/api/volume/test_volumes_negative.py
+++ b/tempest/api/volume/test_volumes_negative.py
@@ -127,14 +127,14 @@
def test_update_volume_with_nonexistent_volume_id(self):
"""Test updating non existent volume should fail"""
self.assertRaises(lib_exc.NotFound, self.volumes_client.update_volume,
- volume_id=data_utils.rand_uuid())
+ volume_id=data_utils.rand_uuid(), name="n")
@decorators.attr(type=['negative'])
@decorators.idempotent_id('e66e40d6-65e6-4e75-bdc7-636792fa152d')
def test_update_volume_with_invalid_volume_id(self):
"""Test updating volume with invalid volume id should fail"""
self.assertRaises(lib_exc.NotFound, self.volumes_client.update_volume,
- volume_id=data_utils.rand_name('invalid'))
+ volume_id=data_utils.rand_name('invalid'), name="n")
@decorators.attr(type=['negative'])
@decorators.idempotent_id('72aeca85-57a5-4c1f-9057-f320f9ea575b')
diff --git a/tempest/clients.py b/tempest/clients.py
index 1db93a0..6d19a0c 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -44,7 +44,7 @@
self._set_object_storage_clients()
self._set_image_clients()
self._set_network_clients()
- self.placement_client = self.placement.PlacementClient()
+ self._set_placement_clients()
# TODO(andreaf) This is maintained for backward compatibility
# with plugins, but it should removed eventually, since it was
# never a stable interface and it's not useful anyways
@@ -139,6 +139,11 @@
self.snapshots_extensions_client = self.compute.SnapshotsClient(
**params_volume)
+ def _set_placement_clients(self):
+ self.placement_client = self.placement.PlacementClient()
+ self.resource_providers_client = \
+ self.placement.ResourceProvidersClient()
+
def _set_identity_clients(self):
# Clients below use the admin endpoint type of Keystone API v2
params_v2_admin = {
@@ -203,6 +208,8 @@
**params_v3)
self.application_credentials_client = \
self.identity_v3.ApplicationCredentialsClient(**params_v3)
+ self.access_rules_client = \
+ self.identity_v3.AccessRulesClient(**params_v3)
# Token clients do not use the catalog. They only need default_params.
# They read auth_url, so they should only be set if the corresponding
diff --git a/tempest/cmd/account_generator.py b/tempest/cmd/account_generator.py
index ff552a1..917262e 100755
--- a/tempest/cmd/account_generator.py
+++ b/tempest/cmd/account_generator.py
@@ -270,7 +270,7 @@
config.CONF.set_config_path(parsed_args.config_file)
setup_logging()
resources = []
- for count in range(parsed_args.concurrency):
+ for _ in range(parsed_args.concurrency):
# Use N different cred_providers to obtain different
# sets of creds
cred_provider = get_credential_provider(parsed_args)
diff --git a/tempest/cmd/run.py b/tempest/cmd/run.py
index d82b6df..8bebce2 100644
--- a/tempest/cmd/run.py
+++ b/tempest/cmd/run.py
@@ -129,7 +129,6 @@
from cliff import command
from oslo_serialization import jsonutils as json
-import six
from stestr import commands
from tempest import clients
@@ -139,10 +138,6 @@
from tempest.common import credentials_factory as credentials
from tempest import config
-if six.PY2:
- # Python 2 has not FileNotFoundError exception
- FileNotFoundError = IOError
-
CONF = config.CONF
SAVED_STATE_JSON = "saved_state.json"
@@ -167,7 +162,7 @@
# environment variable and fall back to "python", under python3
# if it does not exist. we should set it to the python3 executable
# to deal with this situation better for now.
- if six.PY3 and 'PYTHON' not in os.environ:
+ if 'PYTHON' not in os.environ:
os.environ['PYTHON'] = sys.executable
def _create_stestr_conf(self):
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index edb9d16..42f68f1 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -19,7 +19,6 @@
import struct
import textwrap
-import six
from six.moves.urllib import parse as urlparse
from oslo_log import log as logging
@@ -31,11 +30,6 @@
from tempest.lib.common import rest_client
from tempest.lib.common.utils import data_utils
-if six.PY2:
- ord_func = ord
-else:
- ord_func = int
-
CONF = config.CONF
LOG = logging.getLogger(__name__)
@@ -64,7 +58,7 @@
def create_test_server(clients, validatable=False, validation_resources=None,
tenant_network=None, wait_until=None,
volume_backed=False, name=None, flavor=None,
- image_id=None, **kwargs):
+ image_id=None, wait_for_sshable=True, **kwargs):
"""Common wrapper utility returning a test server.
This method is a common wrapper returning a test server that can be
@@ -100,6 +94,8 @@
CONF.compute.flavor_ref will be used instead.
:param image_id: ID of the image to be used to provision the server. If not
defined, CONF.compute.image_ref will be used instead.
+ :param wait_for_sshable: Check server's console log and wait until it will
+ be ready to login.
:returns: a tuple
"""
@@ -270,6 +266,10 @@
LOG.exception('Server %s failed to delete in time',
server['id'])
+ if (validatable and CONF.compute_feature_enabled.console_output and
+ wait_for_sshable):
+ waiters.wait_for_guest_os_boot(clients.servers_client, server['id'])
+
return body, servers
@@ -365,8 +365,8 @@
# frames less than 125 bytes here (for the negotiation) and
# that only the 2nd byte contains the length, and since the
# server doesn't do masking, we can just read the data length
- if ord_func(header[1]) & 127 > 0:
- return self._recv(ord_func(header[1]) & 127)
+ if int(header[1]) & 127 > 0:
+ return self._recv(int(header[1]) & 127)
def send_frame(self, data):
"""Wrapper for sending data to add in the WebSocket frame format."""
@@ -383,7 +383,7 @@
frame_bytes.append(mask[i])
# Mask each of the actual data bytes that we are going to send
for i in range(len(data)):
- frame_bytes.append(ord_func(data[i]) ^ mask[i % 4])
+ frame_bytes.append(int(data[i]) ^ mask[i % 4])
# Convert our integer list to a binary array of bytes
frame_bytes = struct.pack('!%iB' % len(frame_bytes), * frame_bytes)
self._socket.sendall(frame_bytes)
diff --git a/tempest/common/utils/__init__.py b/tempest/common/utils/__init__.py
index 167bf5b..914acf7 100644
--- a/tempest/common/utils/__init__.py
+++ b/tempest/common/utils/__init__.py
@@ -128,3 +128,18 @@
if extension_name in config_dict[service]:
return True
return False
+
+
+def is_network_feature_enabled(feature_name):
+ """A function that will check the list of available network features
+
+ """
+ list_of_features = CONF.network_feature_enabled.available_features
+
+ if not list_of_features:
+ return False
+ if list_of_features[0] == 'all':
+ return True
+ if feature_name in list_of_features:
+ return True
+ return False
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index fc25914..625e08e 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -124,12 +124,18 @@
raise lib_exc.DeleteErrorException(
"Server %s failed to delete and is in ERROR status" %
server_id)
+
if server_status == 'SOFT_DELETED':
# Soft-deleted instances need to be forcibly deleted to
# prevent some test cases from failing.
LOG.debug("Automatically force-deleting soft-deleted server %s",
server_id)
- client.force_delete_server(server_id)
+ try:
+ client.force_delete_server(server_id)
+ except lib_exc.NotFound:
+ # The instance may have been deleted so ignore
+ # NotFound exception
+ return
if int(time.time()) - start_time >= client.build_timeout:
raise lib_exc.TimeoutException
@@ -187,6 +193,59 @@
raise lib_exc.TimeoutException(message)
+def wait_for_image_imported_to_stores(client, image_id, stores):
+ """Waits for an image to be imported to all requested stores.
+
+ The client should also have build_interval and build_timeout attributes.
+ """
+
+ start = int(time.time())
+ while int(time.time()) - start < client.build_timeout:
+ image = client.show_image(image_id)
+ if image['status'] == 'active' and image['stores'] == stores:
+ return
+
+ time.sleep(client.build_interval)
+
+ message = ('Image %s failed to import on stores: %s' %
+ (image_id, str(image['os_glance_failed_import'])))
+ caller = test_utils.find_test_caller()
+ if caller:
+ message = '(%s) %s' % (caller, message)
+ raise lib_exc.TimeoutException(message)
+
+
+def wait_for_image_copied_to_stores(client, image_id):
+ """Waits for an image to be copied on all requested stores.
+
+ The client should also have build_interval and build_timeout attributes.
+ This return the list of stores where copy is failed.
+ """
+
+ start = int(time.time())
+ store_left = []
+ while int(time.time()) - start < client.build_timeout:
+ image = client.show_image(image_id)
+ store_left = image.get('os_glance_importing_to_stores')
+ # NOTE(danms): If os_glance_importing_to_stores is None, then
+ # we've raced with the startup of the task and should continue
+ # to wait.
+ if store_left is not None and not store_left:
+ return image['os_glance_failed_import']
+ if image['status'].lower() == 'killed':
+ raise exceptions.ImageKilledException(image_id=image_id,
+ status=image['status'])
+
+ time.sleep(client.build_interval)
+
+ message = ('Image %s failed to finish the copy operation '
+ 'on stores: %s' % (image_id, str(store_left)))
+ caller = test_utils.find_test_caller()
+ if caller:
+ message = '(%s) %s' % (caller, message)
+ raise lib_exc.TimeoutException(message)
+
+
def wait_for_volume_resource_status(client, resource_id, status):
"""Waits for a volume resource to reach a given status.
@@ -378,3 +437,20 @@
'the required time (%s s)' % (port_id, server_id,
client.build_timeout))
raise lib_exc.TimeoutException(message)
+
+
+def wait_for_guest_os_boot(client, server_id):
+ start_time = int(time.time())
+ while True:
+ console_output = client.get_console_output(server_id)['output']
+ for line in console_output.split('\n'):
+ if 'login:' in line.lower():
+ return
+ if int(time.time()) - start_time >= client.build_timeout:
+ LOG.info("Guest OS on server %s probably isn't ready or its "
+ "console log can't be parsed properly. If guest OS "
+ "isn't ready, that may cause problems with SSH to "
+ "the server.",
+ server_id)
+ return
+ time.sleep(client.build_interval)
diff --git a/tempest/config.py b/tempest/config.py
index 77b5ce2..3761d8e 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -250,6 +250,11 @@
default=False,
help='Does the environment have application credentials '
'enabled?'),
+ # Access rules for application credentials is a default feature in Train.
+ # This config option can removed once Stein is EOL.
+ cfg.BoolOpt('access_rules',
+ default=False,
+ help='Does the environment have access rules enabled?'),
cfg.BoolOpt('immutable_user_source',
default=False,
help='Set to True if the environment has a read-only '
@@ -789,6 +794,13 @@
"entry all which indicates every extension is enabled. "
"Empty list indicates all extensions are disabled. "
"To get the list of extensions run: 'neutron ext-list'"),
+ cfg.ListOpt('available_features',
+ default=['all'],
+ help="A list of available network features with a special "
+ "entry all that indicates every feature is available. "
+ "Empty list indicates all features are disabled."
+ "This list can contain features that are not "
+ "discoverable through API."),
cfg.BoolOpt('ipv6_subnet_attributes',
default=False,
help="Allow the execution of IPv6 subnet tests that use "
@@ -857,10 +869,17 @@
cfg.StrOpt('image_ssh_user',
default="root",
help="User name used to authenticate to an instance."),
+ cfg.StrOpt('image_alt_ssh_user',
+ default="root",
+ help="User name used to authenticate to an alt instance."),
cfg.StrOpt('image_ssh_password',
default="password",
help="Password used to authenticate to an instance.",
secret=True),
+ cfg.StrOpt('image_alt_ssh_password',
+ default="password",
+ help="Password used to authenticate to an alt instance.",
+ secret=True),
cfg.StrOpt('ssh_shell_prologue',
default="set -eu -o pipefail; PATH=$$PATH:/sbin:/usr/sbin;",
help="Shell fragments to use before executing a command "
@@ -1198,7 +1217,7 @@
The best use case is investigating used resources of one test.
A test can be run as follows:
- $ ostestr --pdb TEST_ID
+ $ stestr run --pdb TEST_ID
or
$ python -m testtools.run TEST_ID"""),
]
diff --git a/tempest/lib/api_schema/response/compute/v2_71/servers.py b/tempest/lib/api_schema/response/compute/v2_71/servers.py
index 5cf0f8a..f4c01ee 100644
--- a/tempest/lib/api_schema/response/compute/v2_71/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_71/servers.py
@@ -79,3 +79,6 @@
check_tag_existence = copy.deepcopy(servers270.check_tag_existence)
update_tag = copy.deepcopy(servers270.update_tag)
delete_tag = copy.deepcopy(servers270.delete_tag)
+attach_volume = copy.deepcopy(servers270.attach_volume)
+show_volume_attachment = copy.deepcopy(servers270.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers270.list_volume_attachments)
diff --git a/tempest/lib/api_schema/response/compute/v2_73/servers.py b/tempest/lib/api_schema/response/compute/v2_73/servers.py
index 6e491e9..ae7ebc4 100644
--- a/tempest/lib/api_schema/response/compute/v2_73/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_73/servers.py
@@ -76,3 +76,6 @@
check_tag_existence = copy.deepcopy(servers271.check_tag_existence)
update_tag = copy.deepcopy(servers271.update_tag)
delete_tag = copy.deepcopy(servers271.delete_tag)
+attach_volume = copy.deepcopy(servers271.attach_volume)
+show_volume_attachment = copy.deepcopy(servers271.show_volume_attachment)
+list_volume_attachments = copy.deepcopy(servers271.list_volume_attachments)
diff --git a/tempest/lib/api_schema/response/volume/groups.py b/tempest/lib/api_schema/response/volume/groups.py
index cb31269..f6e4bc2 100644
--- a/tempest/lib/api_schema/response/volume/groups.py
+++ b/tempest/lib/api_schema/response/volume/groups.py
@@ -64,7 +64,10 @@
'type': 'array',
'items': {'type': 'string', 'format': 'uuid'}
},
- 'replication_status': {'type': 'string'}
+ # TODO(zhufl): replication_status is added in 3.38, we
+ # should move it to the 3.38 schema file when microversion
+ # is supported in volume interfaces
+ 'replication_status': {'type': ['string', 'null']}
},
'additionalProperties': False,
'required': ['status', 'description', 'created_at',
@@ -129,6 +132,10 @@
'type': 'array',
'items': {'type': 'string', 'format': 'uuid'}
},
+ # TODO(zhufl): replication_status is added in 3.38, we
+ # should move it to the 3.38 schema file when
+ # microversion is supported in volume interfaces
+ 'replication_status': {'type': ['string', 'null']}
},
'additionalProperties': False,
'required': ['status', 'description', 'created_at',
diff --git a/tempest/lib/auth.py b/tempest/lib/auth.py
index 3fee489..7c279ab 100644
--- a/tempest/lib/auth.py
+++ b/tempest/lib/auth.py
@@ -391,7 +391,7 @@
"""
if auth_data is None:
auth_data = self.get_auth()
- token, _auth_data = auth_data
+ _, _auth_data = auth_data
service = filters.get('service')
region = filters.get('region')
name = filters.get('name')
@@ -524,7 +524,7 @@
"""
if auth_data is None:
auth_data = self.get_auth()
- token, _auth_data = auth_data
+ _, _auth_data = auth_data
service = filters.get('service')
region = filters.get('region')
name = filters.get('name')
diff --git a/tempest/lib/cli/base.py b/tempest/lib/cli/base.py
index d8c776b..c661d21 100644
--- a/tempest/lib/cli/base.py
+++ b/tempest/lib/cli/base.py
@@ -18,7 +18,6 @@
import subprocess
from oslo_log import log as logging
-import six
from tempest.lib import base
import tempest.lib.cli.output_parser
@@ -55,8 +54,6 @@
flags, action, params])
cmd = cmd.strip()
LOG.info("running: '%s'", cmd)
- if six.PY2:
- cmd = cmd.encode('utf-8')
cmd = shlex.split(cmd)
stdout = subprocess.PIPE
stderr = subprocess.STDOUT if merge_stderr else subprocess.PIPE
@@ -67,10 +64,7 @@
cmd,
result,
result_err)
- if six.PY2:
- return result
- else:
- return os.fsdecode(result)
+ return os.fsdecode(result)
class CLIClient(object):
diff --git a/tempest/lib/cmd/check_uuid.py b/tempest/lib/cmd/check_uuid.py
index 71ecb32..ff09671 100755
--- a/tempest/lib/cmd/check_uuid.py
+++ b/tempest/lib/cmd/check_uuid.py
@@ -16,6 +16,7 @@
import argparse
import ast
+import contextlib
import importlib
import inspect
import os
@@ -28,7 +29,7 @@
DECORATOR_MODULE = 'decorators'
DECORATOR_NAME = 'idempotent_id'
-DECORATOR_IMPORT = 'tempest.%s' % DECORATOR_MODULE
+DECORATOR_IMPORT = 'tempest.lib.%s' % DECORATOR_MODULE
IMPORT_LINE = 'from tempest.lib import %s' % DECORATOR_MODULE
DECORATOR_TEMPLATE = "@%s.%s('%%s')" % (DECORATOR_MODULE,
DECORATOR_NAME)
@@ -180,34 +181,124 @@
elif isinstance(node, ast.ImportFrom):
return '%s.%s' % (node.module, node.names[0].name)
+ @contextlib.contextmanager
+ def ignore_site_packages_paths(self):
+ """Removes site-packages directories from the sys.path
+
+ Source:
+ - StackOverflow: https://stackoverflow.com/questions/22195382/
+ - Author: https://stackoverflow.com/users/485844/
+ """
+
+ paths = sys.path
+ # remove all third-party paths
+ # so that only stdlib imports will succeed
+ sys.path = list(filter(
+ None,
+ filter(lambda i: 'site-packages' not in i, sys.path)
+ ))
+ yield
+ sys.path = paths
+
+ def is_std_lib(self, module):
+ """Checks whether the module is part of the stdlib or not
+
+ Source:
+ - StackOverflow: https://stackoverflow.com/questions/22195382/
+ - Author: https://stackoverflow.com/users/485844/
+ """
+
+ if module in sys.builtin_module_names:
+ return True
+
+ with self.ignore_site_packages_paths():
+ imported_module = sys.modules.pop(module, None)
+ try:
+ importlib.import_module(module)
+ except ImportError:
+ return False
+ else:
+ return True
+ finally:
+ if imported_module:
+ sys.modules[module] = imported_module
+
def _add_import_for_test_uuid(self, patcher, src_parsed, source_path):
- with open(source_path) as f:
- src_lines = f.read().split('\n')
- line_no = 0
- tempest_imports = [node for node in src_parsed.body
+ import_list = [node for node in src_parsed.body
+ if isinstance(node, (ast.Import, ast.ImportFrom))]
+
+ if not import_list:
+ print("(WARNING) %s: The file is not valid as it does not contain "
+ "any import line! Therefore the import needed by "
+ "@decorators.idempotent_id is not added!" % source_path)
+ return
+
+ tempest_imports = [node for node in import_list
if self._import_name(node) and
'tempest.' in self._import_name(node)]
- if not tempest_imports:
- import_snippet = '\n'.join(('', IMPORT_LINE, ''))
- else:
- for node in tempest_imports:
- if self._import_name(node) < DECORATOR_IMPORT:
- continue
- else:
- line_no = node.lineno
- import_snippet = IMPORT_LINE
- break
+
+ for node in tempest_imports:
+ if self._import_name(node) < DECORATOR_IMPORT:
+ continue
else:
- line_no = tempest_imports[-1].lineno
- while True:
- if (not src_lines[line_no - 1] or
- getattr(self._next_node(src_parsed.body,
- tempest_imports[-1]),
- 'lineno') == line_no or
- line_no == len(src_lines)):
- break
- line_no += 1
- import_snippet = '\n'.join((IMPORT_LINE, ''))
+ line_no = node.lineno
+ break
+ else:
+ if tempest_imports:
+ line_no = tempest_imports[-1].lineno + 1
+
+ # Insert import line between existing tempest imports
+ if tempest_imports:
+ patcher.add_patch(source_path, IMPORT_LINE, line_no)
+ return
+
+ # Group space separated imports together
+ grouped_imports = {}
+ first_import_line = import_list[0].lineno
+ for idx, import_line in enumerate(import_list, first_import_line):
+ group_no = import_line.lineno - idx
+ group = grouped_imports.get(group_no, [])
+ group.append(import_line)
+ grouped_imports[group_no] = group
+
+ if len(grouped_imports) > 3:
+ print("(WARNING) %s: The file contains more than three import "
+ "groups! This is not valid according to the PEP8 "
+ "style guide. " % source_path)
+
+ # Divide grouped_imports into groupes based on PEP8 style guide
+ pep8_groups = {}
+ package_name = self.package.__name__.split(".")[0]
+ for key in grouped_imports:
+ module = self._import_name(grouped_imports[key][0]).split(".")[0]
+ if module.startswith(package_name):
+ group = pep8_groups.get('3rd_group', [])
+ pep8_groups['3rd_group'] = group + grouped_imports[key]
+ elif self.is_std_lib(module):
+ group = pep8_groups.get('1st_group', [])
+ pep8_groups['1st_group'] = group + grouped_imports[key]
+ else:
+ group = pep8_groups.get('2nd_group', [])
+ pep8_groups['2nd_group'] = group + grouped_imports[key]
+
+ for node in pep8_groups.get('2nd_group', []):
+ if self._import_name(node) < DECORATOR_IMPORT:
+ continue
+ else:
+ line_no = node.lineno
+ import_snippet = IMPORT_LINE
+ break
+ else:
+ if pep8_groups.get('2nd_group', []):
+ line_no = pep8_groups['2nd_group'][-1].lineno + 1
+ import_snippet = IMPORT_LINE
+ elif pep8_groups.get('1st_group', []):
+ line_no = pep8_groups['1st_group'][-1].lineno + 1
+ import_snippet = '\n' + IMPORT_LINE
+ else:
+ line_no = pep8_groups['3rd_group'][0].lineno
+ import_snippet = IMPORT_LINE + '\n\n'
+
patcher.add_patch(source_path, import_snippet, line_no)
def get_tests(self):
diff --git a/tempest/lib/common/rest_client.py b/tempest/lib/common/rest_client.py
index 0513e90..b47b511 100644
--- a/tempest/lib/common/rest_client.py
+++ b/tempest/lib/common/rest_client.py
@@ -507,7 +507,7 @@
if not hasattr(body, "keys") or len(body.keys()) != 1:
return body
# Just return the "wrapped" element
- first_key, first_item = six.next(six.iteritems(body))
+ _, first_item = six.next(six.iteritems(body))
if isinstance(first_item, (dict, list)):
return first_item
except (ValueError, IndexError):
diff --git a/tempest/lib/common/thread.py b/tempest/lib/common/thread.py
index b47d40d..ef0ec73 100644
--- a/tempest/lib/common/thread.py
+++ b/tempest/lib/common/thread.py
@@ -13,13 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
-
-if six.PY2:
- # module thread is removed in Python 3
- from thread import get_ident # noqa: H237,F401
-
-else:
- # On Python3 thread module has been deprecated and get_ident has been moved
- # to threading module
- from threading import get_ident # noqa: F401
+# On Python3 thread module has been deprecated and get_ident has been moved
+# to threading module
+from threading import get_ident # noqa: F401
diff --git a/tempest/lib/common/utils/data_utils.py b/tempest/lib/common/utils/data_utils.py
index 7f94612..44b55eb 100644
--- a/tempest/lib/common/utils/data_utils.py
+++ b/tempest/lib/common/utils/data_utils.py
@@ -129,7 +129,7 @@
:rtype: string
"""
guid = []
- for i in range(8):
+ for _ in range(8):
guid.append("%02x" % random.randint(0x00, 0xff))
return ':'.join(guid)
diff --git a/tempest/lib/common/utils/test_utils.py b/tempest/lib/common/utils/test_utils.py
index 2a9f3a9..4cf8351 100644
--- a/tempest/lib/common/utils/test_utils.py
+++ b/tempest/lib/common/utils/test_utils.py
@@ -80,10 +80,19 @@
def call_and_ignore_notfound_exc(func, *args, **kwargs):
"""Call the given function and pass if a `NotFound` exception is raised."""
- try:
- return func(*args, **kwargs)
- except exceptions.NotFound:
- pass
+ attempt = 0
+ while True:
+ attempt += 1
+ try:
+ return func(*args, **kwargs)
+ except exceptions.NotFound:
+ return
+ except exceptions.ServerFault:
+ # NOTE(danms): Tolerate three ServerFault exceptions while trying
+ # to do this thing, and after that, assume it's legit.
+ if attempt >= 3:
+ raise
+ LOG.warning('Got ServerFault while running %s, retrying...', func)
def call_until_true(func, duration, sleep_for, *args, **kwargs):
diff --git a/tempest/lib/services/compute/servers_client.py b/tempest/lib/services/compute/servers_client.py
index 6723516..e82b58f 100644
--- a/tempest/lib/services/compute/servers_client.py
+++ b/tempest/lib/services/compute/servers_client.py
@@ -646,7 +646,7 @@
For a full list of available parameters, please refer to the official
API reference:
- https://docs.openstack.org/api-ref/compute/#create-remote-console
+ https://docs.openstack.org/api-ref/compute/#create-console
"""
param = {
'remote_console': {
diff --git a/tempest/lib/services/identity/v3/__init__.py b/tempest/lib/services/identity/v3/__init__.py
index da1c51c..86fa991 100644
--- a/tempest/lib/services/identity/v3/__init__.py
+++ b/tempest/lib/services/identity/v3/__init__.py
@@ -12,6 +12,8 @@
# License for the specific language governing permissions and limitations under
# the License.
+from tempest.lib.services.identity.v3.access_rules_client import \
+ AccessRulesClient
from tempest.lib.services.identity.v3.application_credentials_client import \
ApplicationCredentialsClient
from tempest.lib.services.identity.v3.catalog_client import \
@@ -48,9 +50,10 @@
from tempest.lib.services.identity.v3.users_client import UsersClient
from tempest.lib.services.identity.v3.versions_client import VersionsClient
-__all__ = ['ApplicationCredentialsClient', 'CatalogClient',
- 'CredentialsClient', 'DomainsClient', 'DomainConfigurationClient',
- 'EndPointGroupsClient', 'EndPointsClient', 'EndPointsFilterClient',
+__all__ = ['AccessRulesClient', 'ApplicationCredentialsClient',
+ 'CatalogClient', 'CredentialsClient', 'DomainsClient',
+ 'DomainConfigurationClient', 'EndPointGroupsClient',
+ 'EndPointsClient', 'EndPointsFilterClient',
'GroupsClient', 'IdentityClient', 'InheritedRolesClient',
'OAUTHConsumerClient', 'OAUTHTokenClient', 'PoliciesClient',
'ProjectsClient', 'ProjectTagsClient', 'RegionsClient',
diff --git a/tempest/lib/services/identity/v3/access_rules_client.py b/tempest/lib/services/identity/v3/access_rules_client.py
new file mode 100644
index 0000000..4f13e47
--- /dev/null
+++ b/tempest/lib/services/identity/v3/access_rules_client.py
@@ -0,0 +1,68 @@
+# Copyright 2019 SUSE LLC
+#
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+https://docs.openstack.org/api-ref/identity/v3/index.html#application-credentials
+"""
+
+from oslo_serialization import jsonutils as json
+from six.moves.urllib import parse as urllib
+
+from tempest.lib.common import rest_client
+
+
+class AccessRulesClient(rest_client.RestClient):
+ api_version = "v3"
+
+ def show_access_rule(self, user_id, access_rule_id):
+ """Gets details of an access rule.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/identity/v3/index.html#show-access-rule-details
+ """
+ resp, body = self.get('users/%s/access_rules/%s' %
+ (user_id, access_rule_id))
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
+ def list_access_rules(self, user_id, **params):
+ """Lists out all of a user's access rules.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/identity/v3/index.html#list-access-rules
+ """
+ url = 'users/%s/access_rules' % user_id
+ if params:
+ url += '?%s' % urllib.urlencode(params)
+ resp, body = self.get(url)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
+ def delete_access_rule(self, user_id, access_rule_id):
+ """Deletes an access rule.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/identity/v3/index.html#delete-access-rule
+ """
+ resp, body = self.delete('users/%s/access_rules/%s' %
+ (user_id, access_rule_id))
+ self.expected_success(204, resp.status)
+ return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/identity/v3/groups_client.py b/tempest/lib/services/identity/v3/groups_client.py
index f823b21..2cfb24a 100644
--- a/tempest/lib/services/identity/v3/groups_client.py
+++ b/tempest/lib/services/identity/v3/groups_client.py
@@ -110,6 +110,6 @@
def check_group_user_existence(self, group_id, user_id):
"""Check user in group."""
- resp, body = self.head('groups/%s/users/%s' % (group_id, user_id))
+ resp, _ = self.head('groups/%s/users/%s' % (group_id, user_id))
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp)
diff --git a/tempest/lib/services/identity/v3/inherited_roles_client.py b/tempest/lib/services/identity/v3/inherited_roles_client.py
index 3949437..f937ed6 100644
--- a/tempest/lib/services/identity/v3/inherited_roles_client.py
+++ b/tempest/lib/services/identity/v3/inherited_roles_client.py
@@ -51,7 +51,7 @@
def check_user_inherited_project_role_on_domain(
self, domain_id, user_id, role_id):
"""Checks whether a user has an inherited project role on a domain."""
- resp, body = self.head(
+ resp, _ = self.head(
"OS-INHERIT/domains/%s/users/%s/roles/%s/inherited_to_projects"
% (domain_id, user_id, role_id))
self.expected_success(204, resp.status)
@@ -88,7 +88,7 @@
def check_group_inherited_project_role_on_domain(
self, domain_id, group_id, role_id):
"""Checks whether a group has an inherited project role on a domain."""
- resp, body = self.head(
+ resp, _ = self.head(
"OS-INHERIT/domains/%s/groups/%s/roles/%s/inherited_to_projects"
% (domain_id, group_id, role_id))
self.expected_success(204, resp.status)
@@ -115,7 +115,7 @@
def check_user_has_flag_on_inherited_to_project(
self, project_id, user_id, role_id):
"""Check if user has an inherited project role on project"""
- resp, body = self.head(
+ resp, _ = self.head(
"OS-INHERIT/projects/%s/users/%s/roles/%s/inherited_to_projects"
% (project_id, user_id, role_id))
self.expected_success(204, resp.status)
@@ -142,7 +142,7 @@
def check_group_has_flag_on_inherited_to_project(
self, project_id, group_id, role_id):
"""Check if group has an inherited project role on project"""
- resp, body = self.head(
+ resp, _ = self.head(
"OS-INHERIT/projects/%s/groups/%s/roles/%s/inherited_to_projects"
% (project_id, group_id, role_id))
self.expected_success(204, resp.status)
diff --git a/tempest/lib/services/identity/v3/oauth_token_client.py b/tempest/lib/services/identity/v3/oauth_token_client.py
index 6ca401b..722deca 100644
--- a/tempest/lib/services/identity/v3/oauth_token_client.py
+++ b/tempest/lib/services/identity/v3/oauth_token_client.py
@@ -71,7 +71,7 @@
normalized_params = '&'.join(parameter_parts)
# normalize_uri
- scheme, netloc, path, params, query, fragment = urlparse.urlparse(uri)
+ scheme, netloc, path, params, _, _ = urlparse.urlparse(uri)
scheme = scheme.lower()
netloc = netloc.lower()
path = path.replace('//', '/')
diff --git a/tempest/lib/services/identity/v3/roles_client.py b/tempest/lib/services/identity/v3/roles_client.py
index f9356be..0d7593a 100644
--- a/tempest/lib/services/identity/v3/roles_client.py
+++ b/tempest/lib/services/identity/v3/roles_client.py
@@ -122,16 +122,16 @@
def check_user_role_existence_on_project(self, project_id,
user_id, role_id):
"""Check role of a user on a project."""
- resp, body = self.head('projects/%s/users/%s/roles/%s' %
- (project_id, user_id, role_id))
+ resp, _ = self.head('projects/%s/users/%s/roles/%s' %
+ (project_id, user_id, role_id))
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp)
def check_user_role_existence_on_domain(self, domain_id,
user_id, role_id):
"""Check role of a user on a domain."""
- resp, body = self.head('domains/%s/users/%s/roles/%s' %
- (domain_id, user_id, role_id))
+ resp, _ = self.head('domains/%s/users/%s/roles/%s' %
+ (domain_id, user_id, role_id))
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp)
@@ -182,16 +182,16 @@
def check_role_from_group_on_project_existence(self, project_id,
group_id, role_id):
"""Check role of a group on a project."""
- resp, body = self.head('projects/%s/groups/%s/roles/%s' %
- (project_id, group_id, role_id))
+ resp, _ = self.head('projects/%s/groups/%s/roles/%s' %
+ (project_id, group_id, role_id))
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp)
def check_role_from_group_on_domain_existence(self, domain_id,
group_id, role_id):
"""Check role of a group on a domain."""
- resp, body = self.head('domains/%s/groups/%s/roles/%s' %
- (domain_id, group_id, role_id))
+ resp, _ = self.head('domains/%s/groups/%s/roles/%s' %
+ (domain_id, group_id, role_id))
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp)
@@ -232,14 +232,14 @@
def check_role_inference_rule(self, prior_role, implies_role):
"""Check a role inference rule."""
- resp, body = self.head('roles/%s/implies/%s' %
- (prior_role, implies_role))
+ resp, _ = self.head('roles/%s/implies/%s' %
+ (prior_role, implies_role))
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp)
def delete_role_inference_rule(self, prior_role, implies_role):
"""Delete a role inference rule."""
- resp, body = self.delete('roles/%s/implies/%s' %
- (prior_role, implies_role))
+ resp, _ = self.delete('roles/%s/implies/%s' %
+ (prior_role, implies_role))
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp)
diff --git a/tempest/lib/services/identity/v3/users_client.py b/tempest/lib/services/identity/v3/users_client.py
index f47730f..bba02a4 100644
--- a/tempest/lib/services/identity/v3/users_client.py
+++ b/tempest/lib/services/identity/v3/users_client.py
@@ -118,3 +118,30 @@
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
+
+ def create_user_ec2_credential(self, user_id, **kwargs):
+ post_body = json.dumps(kwargs)
+ resp, body = self.post('/users/%s/credentials/OS-EC2' % user_id,
+ post_body)
+ self.expected_success(201, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
+ def delete_user_ec2_credential(self, user_id, access):
+ resp, body = self.delete('/users/%s/credentials/OS-EC2/%s' %
+ (user_id, access))
+ self.expected_success(204, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
+ def list_user_ec2_credentials(self, user_id):
+ resp, body = self.get('/users/%s/credentials/OS-EC2' % user_id)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
+ def show_user_ec2_credential(self, user_id, access):
+ resp, body = self.get('/users/%s/credentials/OS-EC2/%s' %
+ (user_id, access))
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/placement/__init__.py b/tempest/lib/services/placement/__init__.py
index 5c20c57..daeaeab 100644
--- a/tempest/lib/services/placement/__init__.py
+++ b/tempest/lib/services/placement/__init__.py
@@ -14,5 +14,7 @@
from tempest.lib.services.placement.placement_client import \
PlacementClient
+from tempest.lib.services.placement.resource_providers_client import \
+ ResourceProvidersClient
-__all__ = ['PlacementClient']
+__all__ = ['PlacementClient', 'ResourceProvidersClient']
diff --git a/tempest/lib/services/placement/resource_providers_client.py b/tempest/lib/services/placement/resource_providers_client.py
new file mode 100644
index 0000000..56f6409
--- /dev/null
+++ b/tempest/lib/services/placement/resource_providers_client.py
@@ -0,0 +1,82 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_serialization import jsonutils as json
+from six.moves.urllib import parse as urllib
+
+from tempest.lib.common import rest_client
+from tempest.lib.services.placement import base_placement_client
+
+
+class ResourceProvidersClient(base_placement_client.BasePlacementClient):
+ """Client class for resource provider related methods
+
+ This client class aims to support read-only API operations for resource
+ providers. The following resources are supported:
+ * resource providers
+ * resource provider inventories
+ * resource provider aggregates
+ """
+
+ def list_resource_providers(self, **params):
+ """List resource providers.
+
+ For full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/placement/#list-resource-providers
+ """
+ url = '/resource_providers'
+ if params:
+ url += '?%s' % urllib.urlencode(params)
+ resp, body = self.get(url)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
+ def show_resource_provider(self, rp_uuid):
+ """Show resource provider.
+
+ For full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/placement/#show-resource-provider
+ """
+ url = '/resource_providers/%s' % rp_uuid
+ resp, body = self.get(url)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
+ def list_resource_provider_inventories(self, rp_uuid):
+ """List resource provider inventories.
+
+ For full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/placement/#list-resource-provider-inventories
+ """
+ url = '/resource_providers/%s/inventories' % rp_uuid
+ resp, body = self.get(url)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
+
+ def list_resource_provider_aggregates(self, rp_uuid):
+ """List resource provider aggregates.
+
+ For full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/placement/#list-resource-provider-aggregates
+ """
+ url = '/resource_providers/%s/aggregates' % rp_uuid
+ resp, body = self.get(url)
+ self.expected_success(200, resp.status)
+ body = json.loads(body)
+ return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v1/volumes_client.py b/tempest/lib/services/volume/v1/volumes_client.py
index 4ed5eb1..2efb0da 100644
--- a/tempest/lib/services/volume/v1/volumes_client.py
+++ b/tempest/lib/services/volume/v1/volumes_client.py
@@ -302,5 +302,5 @@
def retype_volume(self, volume_id, **kwargs):
"""Updates volume with new volume type."""
post_body = json.dumps({'os-retype': kwargs})
- resp, body = self.post('volumes/%s/action' % volume_id, post_body)
+ resp, _ = self.post('volumes/%s/action' % volume_id, post_body)
self.expected_success(202, resp.status)
diff --git a/tempest/lib/services/volume/v3/types_client.py b/tempest/lib/services/volume/v3/types_client.py
index 7fa24a4..1ebd447 100644
--- a/tempest/lib/services/volume/v3/types_client.py
+++ b/tempest/lib/services/volume/v3/types_client.py
@@ -65,6 +65,19 @@
self.validate_response(schema.show_volume_type, resp, body)
return rest_client.ResponseBody(resp, body)
+ def show_default_volume_type(self):
+ """Returns the details of a single volume type.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/block-storage/v3/index.html#show-default-volume-type
+ """
+ url = "types/default"
+ resp, body = self.get(url)
+ body = json.loads(body)
+ self.validate_response(schema.show_volume_type, resp, body)
+ return rest_client.ResponseBody(resp, body)
+
def create_volume_type(self, **kwargs):
"""Create volume type.
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index ff860d5..3c37b70 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -160,7 +160,7 @@
client.delete_port, port['id'])
return port
- def create_keypair(self, client=None):
+ def create_keypair(self, client=None, **kwargs):
"""Creates keypair
Keypair is a public key of OpenSSH key pair used for accessing
@@ -170,10 +170,11 @@
"""
if not client:
client = self.keypairs_client
- name = data_utils.rand_name(self.__class__.__name__)
+ if not kwargs.get('name'):
+ kwargs['name'] = data_utils.rand_name(self.__class__.__name__)
# We don't need to create a keypair by pubkey in scenario
- body = client.create_keypair(name=name)
- self.addCleanup(client.delete_keypair, name)
+ body = client.create_keypair(**kwargs)
+ self.addCleanup(client.delete_keypair, kwargs['name'])
return body['keypair']
def create_server(self, name=None, image_id=None, flavor=None,
@@ -306,7 +307,7 @@
return server
def create_volume(self, size=None, name=None, snapshot_id=None,
- imageRef=None, volume_type=None):
+ imageRef=None, volume_type=None, **kwargs):
"""Creates volume
This wrapper utility creates volume and waits for volume to be
@@ -326,11 +327,11 @@
size = max(size, min_disk)
if name is None:
name = data_utils.rand_name(self.__class__.__name__ + "-volume")
- kwargs = {'display_name': name,
- 'snapshot_id': snapshot_id,
- 'imageRef': imageRef,
- 'volume_type': volume_type,
- 'size': size}
+ kwargs.update({'name': name,
+ 'snapshot_id': snapshot_id,
+ 'imageRef': imageRef,
+ 'volume_type': volume_type,
+ 'size': size})
if CONF.compute.compute_volume_common_az:
kwargs.setdefault('availability_zone',
@@ -422,7 +423,7 @@
snapshot = self.snapshots_client.create_snapshot(
volume_id=volume_id,
force=force,
- display_name=name,
+ name=name,
description=description,
metadata=metadata)['snapshot']
@@ -625,7 +626,7 @@
LOG.debug("image:%s", image['id'])
return image['id']
- def _log_console_output(self, servers=None, client=None):
+ def _log_console_output(self, servers=None, client=None, **kwargs):
"""Console log output"""
if not CONF.compute_feature_enabled.console_output:
LOG.debug('Console output not supported, cannot log')
@@ -637,7 +638,7 @@
for server in servers:
try:
console_output = client.get_console_output(
- server['id'])['output']
+ server['id'], **kwargs)['output']
LOG.debug('Console output for %s\nbody=\n%s',
server['id'], console_output)
except lib_exc.NotFound:
@@ -697,17 +698,20 @@
image_name, server['name'])
return snapshot_image
- def nova_volume_attach(self, server, volume_to_attach):
+ def nova_volume_attach(self, server, volume_to_attach, **kwargs):
"""Compute volume attach
This utility attaches volume from compute and waits for the
volume status to be 'in-use' state.
"""
volume = self.servers_client.attach_volume(
- server['id'], volumeId=volume_to_attach['id'])['volumeAttachment']
+ server['id'], volumeId=volume_to_attach['id'],
+ **kwargs)['volumeAttachment']
self.assertEqual(volume_to_attach['id'], volume['id'])
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'in-use')
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.nova_volume_detach, server, volume)
# Return the updated volume after the attachment
return self.volumes_client.show_volume(volume['id'])['volume']
@@ -810,13 +814,15 @@
LOG.exception(extra_msg)
raise
- def create_floating_ip(self, server, pool_name=None):
+ def create_floating_ip(self, server, pool_name=None, **kwargs):
"""Create a floating IP and associates to a server on Nova"""
if not pool_name:
pool_name = CONF.network.floating_network_name
+
floating_ip = (self.compute_floating_ips_client.
- create_floating_ip(pool=pool_name)['floating_ip'])
+ create_floating_ip(pool=pool_name,
+ **kwargs)['floating_ip'])
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.compute_floating_ips_client.delete_floating_ip,
floating_ip['id'])
@@ -825,18 +831,20 @@
return floating_ip
def create_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
- private_key=None, server=None):
+ private_key=None, server=None, username=None,
+ fs='ext4'):
"""Creates timestamp
This wrapper utility does ssh, creates timestamp and returns the
created timestamp.
"""
-
ssh_client = self.get_remote_client(ip_address,
private_key=private_key,
- server=server)
+ server=server,
+ username=username)
+
if dev_name is not None:
- ssh_client.make_fs(dev_name)
+ ssh_client.make_fs(dev_name, fs=fs)
ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
mount_path))
cmd_timestamp = 'sudo sh -c "date > %s/timestamp; sync"' % mount_path
@@ -865,18 +873,22 @@
ssh_client.exec_command('sudo umount %s' % mount_path)
return timestamp
- def get_server_ip(self, server):
+ def get_server_ip(self, server, **kwargs):
"""Get the server fixed or floating IP.
Based on the configuration we're in, return a correct ip
address for validating that a guest is up.
+
+ If CONF.validation.connect_method is floating, then
+ a floating ip will be created passing kwargs as additional
+ argument.
"""
if CONF.validation.connect_method == 'floating':
# The tests calling this method don't have a floating IP
# and can't make use of the validation resources. So the
# method is creating the floating IP there.
- return self.create_floating_ip(server)['ip']
+ return self.create_floating_ip(server, **kwargs)['ip']
elif CONF.validation.connect_method == 'fixed':
# Determine the network name to look for based on config or creds
# provider network resources.
@@ -916,14 +928,14 @@
keypair=None,
security_group=None,
delete_on_termination=False,
- name=None):
+ name=None, **kwargs):
"""Boot instance from resource
This wrapper utility boots instance from resource with block device
mapping with source info passed in arguments
"""
- create_kwargs = dict()
+ create_kwargs = dict({'image_id': ''})
if keypair:
create_kwargs['key_name'] = keypair['name']
if security_group:
@@ -935,8 +947,9 @@
delete_on_termination=delete_on_termination))
if name:
create_kwargs['name'] = name
+ create_kwargs.update(kwargs)
- return self.create_server(image_id='', **create_kwargs)
+ return self.create_server(**create_kwargs)
def create_volume_from_image(self):
"""Create volume from image"""
@@ -1063,14 +1076,13 @@
return subnet
- def _get_server_port_id_and_ip4(self, server, ip_addr=None):
- if ip_addr:
- ports = self.os_admin.ports_client.list_ports(
- device_id=server['id'],
- fixed_ips='ip_address=%s' % ip_addr)['ports']
- else:
- ports = self.os_admin.ports_client.list_ports(
- device_id=server['id'])['ports']
+ def _get_server_port_id_and_ip4(self, server, ip_addr=None, **kwargs):
+
+ if ip_addr and not kwargs.get('fixed_ips'):
+ kwargs['fixed_ips'] = 'ip_address=%s' % ip_addr
+ ports = self.os_admin.ports_client.list_ports(
+ device_id=server['id'], **kwargs)['ports']
+
# A port can have more than one IP address in some cases.
# If the network is dual-stack (IPv4 + IPv6), this port is associated
# with 2 subnets
@@ -1109,7 +1121,7 @@
return net[0]
def create_floating_ip(self, server, external_network_id=None,
- port_id=None, client=None):
+ port_id=None, client=None, **kwargs):
"""Create a floating IP and associates to a resource/port on Neutron"""
if not external_network_id:
@@ -1121,15 +1133,17 @@
else:
ip4 = None
- kwargs = {
+ floatingip_kwargs = {
'floating_network_id': external_network_id,
'port_id': port_id,
'tenant_id': server.get('project_id') or server['tenant_id'],
'fixed_ip_address': ip4,
}
if CONF.network.subnet_id:
- kwargs['subnet_id'] = CONF.network.subnet_id
- result = client.create_floatingip(**kwargs)
+ floatingip_kwargs['subnet_id'] = CONF.network.subnet_id
+
+ floatingip_kwargs.update(kwargs)
+ result = client.create_floatingip(**floatingip_kwargs)
floating_ip = result['floatingip']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
diff --git a/tempest/scenario/test_aggregates_basic_ops.py b/tempest/scenario/test_aggregates_basic_ops.py
index b515639..58e234f 100644
--- a/tempest/scenario/test_aggregates_basic_ops.py
+++ b/tempest/scenario/test_aggregates_basic_ops.py
@@ -51,10 +51,27 @@
return aggregate
def _get_host_name(self):
+ # Find a host that has not been added to other availability zone,
+ # for one host can't be added to different availability zones.
svc_list = self.services_client.list_services(
binary='nova-compute')['services']
self.assertNotEmpty(svc_list)
- return svc_list[0]['host']
+ hosts_available = []
+ for host in svc_list:
+ if (host['state'] == 'up' and host['status'] == 'enabled'):
+ hosts_available.append(host['host'])
+ aggregates = self.aggregates_client.list_aggregates()['aggregates']
+ hosts_in_zone = []
+ for agg in aggregates:
+ if agg['availability_zone']:
+ hosts_in_zone.extend(agg['hosts'])
+ hosts = [v for v in hosts_available if v not in hosts_in_zone]
+ if not hosts:
+ raise self.skipException("All hosts are already in other "
+ "availability zones, so can't add "
+ "host to aggregate. \nAggregates list: "
+ "%s" % aggregates)
+ return hosts[0]
def _add_host(self, aggregate_id, host):
aggregate = (self.aggregates_client.add_host(aggregate_id, host=host)
diff --git a/tempest/scenario/test_minbw_allocation_placement.py b/tempest/scenario/test_minbw_allocation_placement.py
index 5eab1da..74d4ed9 100644
--- a/tempest/scenario/test_minbw_allocation_placement.py
+++ b/tempest/scenario/test_minbw_allocation_placement.py
@@ -178,7 +178,13 @@
for rp, resources in allocations['allocations'].items():
if self.INGRESS_RESOURCE_CLASS in resources['resources']:
bw_resource_in_alloc = True
+ allocation_rp = rp
self.assertTrue(bw_resource_in_alloc)
+ # Check that binding_profile of the port is not empty and equals with
+ # the rp uuid
+ port = self.os_admin.ports_client.show_port(valid_port['id'])
+ self.assertEqual(allocation_rp,
+ port['port']['binding:profile']['allocation'])
# boot another vm with max int bandwidth
not_valid_port = self.create_port(
@@ -196,3 +202,6 @@
server2 = self.servers_client.show_server(server2['id'])
self.assertIn('fault', server2['server'])
self.assertIn('No valid host', server2['server']['fault']['message'])
+ # Check that binding_profile of the port is empty
+ port = self.os_admin.ports_client.show_port(not_valid_port['id'])
+ self.assertEqual(0, len(port['port']['binding:profile']))
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index e26dc9d..dbab212 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -80,8 +80,8 @@
return floating_ip
def _check_network_connectivity(self, server, keypair, floating_ip,
- should_connect=True):
- username = CONF.validation.image_ssh_user
+ should_connect=True,
+ username=CONF.validation.image_ssh_user):
private_key = keypair['private_key']
self.check_tenant_network_connectivity(
server, username, private_key,
@@ -95,12 +95,13 @@
'Public network connectivity check failed',
server)
- def _wait_server_status_and_check_network_connectivity(self, server,
- keypair,
- floating_ip):
+ def _wait_server_status_and_check_network_connectivity(
+ self, server, keypair, floating_ip,
+ username=CONF.validation.image_ssh_user):
waiters.wait_for_server_status(self.servers_client, server['id'],
'ACTIVE')
- self._check_network_connectivity(server, keypair, floating_ip)
+ self._check_network_connectivity(server, keypair, floating_ip,
+ username=username)
@decorators.idempotent_id('61f1aa9a-1573-410e-9054-afa557cab021')
@decorators.attr(type='slow')
@@ -137,10 +138,11 @@
server = self._setup_server(keypair)
floating_ip = self._setup_network(server, keypair)
image_ref_alt = CONF.compute.image_ref_alt
+ username_alt = CONF.validation.image_alt_ssh_user
self.servers_client.rebuild_server(server['id'],
image_ref=image_ref_alt)
self._wait_server_status_and_check_network_connectivity(
- server, keypair, floating_ip)
+ server, keypair, floating_ip, username_alt)
@decorators.idempotent_id('2b2642db-6568-4b35-b812-eceed3fa20ce')
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
diff --git a/tempest/tests/cmd/test_run.py b/tempest/tests/cmd/test_run.py
index 5d9ddfa..3c99bbe 100644
--- a/tempest/tests/cmd/test_run.py
+++ b/tempest/tests/cmd/test_run.py
@@ -29,10 +29,6 @@
from tempest.lib.common.utils import data_utils
from tempest.tests import base
-if six.PY2:
- # Python 2 has not FileNotFoundError exception
- FileNotFoundError = IOError
-
DEVNULL = open(os.devnull, 'wb')
atexit.register(DEVNULL.close)
@@ -149,8 +145,7 @@
]
# NOTE(mtreinish): on python 3 the subprocess prints b'' around
# stdout.
- if six.PY3:
- result = ["b\'" + x + "\'" for x in result]
+ result = ["b\'" + x + "\'" for x in result]
self.assertEqual(result, tests)
def test_tempest_run_with_worker_file(self):
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index 73924bd..f45eec0 100755
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -55,6 +55,56 @@
waiters.wait_for_image_status,
self.client, 'fake_image_id', 'active')
+ def test_wait_for_image_imported_to_stores(self):
+ self.client.show_image.return_value = ({'status': 'active',
+ 'stores': 'fake_store'})
+ start_time = int(time.time())
+ waiters.wait_for_image_imported_to_stores(
+ self.client, 'fake_image_id', 'fake_store')
+ end_time = int(time.time())
+ # Ensure waiter returns before build_timeout
+ self.assertLess((end_time - start_time), 10)
+
+ def test_wait_for_image_imported_to_stores_timeout(self):
+ time_mock = self.patch('time.time')
+ client = mock.MagicMock()
+ client.build_timeout = 2
+ self.patch('time.time', side_effect=[0., 1., 2.])
+ time_mock.side_effect = utils.generate_timeout_series(1)
+
+ client.show_image.return_value = ({
+ 'status': 'saving',
+ 'stores': 'fake_store',
+ 'os_glance_failed_import': 'fake_os_glance_failed_import'})
+ self.assertRaises(lib_exc.TimeoutException,
+ waiters.wait_for_image_imported_to_stores,
+ client, 'fake_image_id', 'fake_store')
+
+ def test_wait_for_image_copied_to_stores(self):
+ self.client.show_image.return_value = ({
+ 'status': 'active',
+ 'os_glance_importing_to_stores': '',
+ 'os_glance_failed_import': 'fake_os_glance_failed_import'})
+ start_time = int(time.time())
+ waiters.wait_for_image_copied_to_stores(
+ self.client, 'fake_image_id')
+ end_time = int(time.time())
+ # Ensure waiter returns before build_timeout
+ self.assertLess((end_time - start_time), 10)
+
+ def test_wait_for_image_copied_to_stores_timeout(self):
+ time_mock = self.patch('time.time')
+ self.patch('time.time', side_effect=[0., 1.])
+ time_mock.side_effect = utils.generate_timeout_series(1)
+
+ self.client.show_image.return_value = ({
+ 'status': 'active',
+ 'os_glance_importing_to_stores': 'processing',
+ 'os_glance_failed_import': 'fake_os_glance_failed_import'})
+ self.assertRaises(lib_exc.TimeoutException,
+ waiters.wait_for_image_copied_to_stores,
+ self.client, 'fake_image_id')
+
class TestInterfaceWaiters(base.TestCase):
@@ -131,6 +181,36 @@
mock.call('server_id')])
sleep.assert_called_once_with(client.build_interval)
+ def test_wait_for_guest_os_boot(self):
+ get_console_output = mock.Mock(
+ side_effect=[
+ {'output': 'os not ready yet\n'},
+ {'output': 'login:\n'}
+ ])
+ client = self.mock_client(get_console_output=get_console_output)
+ self.patch('time.time', return_value=0.)
+ sleep = self.patch('time.sleep')
+
+ with mock.patch.object(waiters.LOG, "info") as log_info:
+ waiters.wait_for_guest_os_boot(client, 'server_id')
+
+ get_console_output.assert_has_calls([
+ mock.call('server_id'), mock.call('server_id')])
+ sleep.assert_called_once_with(client.build_interval)
+ log_info.assert_not_called()
+
+ def test_wait_for_guest_os_boot_timeout(self):
+ get_console_output = mock.Mock(
+ return_value={'output': 'os not ready yet\n'})
+ client = self.mock_client(get_console_output=get_console_output)
+ self.patch('time.time', side_effect=[0., client.build_timeout + 1.])
+ self.patch('time.sleep')
+
+ with mock.patch.object(waiters.LOG, "info") as log_info:
+ waiters.wait_for_guest_os_boot(client, 'server_id')
+
+ log_info.assert_called_once()
+
class TestVolumeWaiters(base.TestCase):
vol_migrating_src_host = {
diff --git a/tempest/tests/lib/cmd/test_check_uuid.py b/tempest/tests/lib/cmd/test_check_uuid.py
index 28ebca1..428e047 100644
--- a/tempest/tests/lib/cmd/test_check_uuid.py
+++ b/tempest/tests/lib/cmd/test_check_uuid.py
@@ -10,6 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import ast
import importlib
import os
import sys
@@ -95,6 +96,8 @@
class TestTestChecker(base.TestCase):
+ IMPORT_LINE = "from tempest.lib import decorators\n"
+
def _test_add_uuid_to_test(self, source_file):
class Fake_test_node():
lineno = 1
@@ -127,55 +130,69 @@
" pass")
self._test_add_uuid_to_test(source_file)
+ @staticmethod
+ def get_mocked_ast_object(lineno, col_offset, module, name, object_type):
+ ast_object = mock.Mock(spec=object_type)
+ name_obj = mock.Mock()
+ ast_object.lineno = lineno
+ ast_object.col_offset = col_offset
+ name_obj.name = name
+ ast_object.module = module
+ ast_object.names = [name_obj]
+
+ return ast_object
+
def test_add_import_for_test_uuid_no_tempest(self):
patcher = check_uuid.SourcePatcher()
checker = check_uuid.TestChecker(importlib.import_module('tempest'))
- fake_file = tempfile.NamedTemporaryFile("w+t")
+ fake_file = tempfile.NamedTemporaryFile("w+t", delete=False)
+ source_code = "from unittest import mock\n"
+ fake_file.write(source_code)
+ fake_file.close()
class Fake_src_parsed():
- body = ['test_node']
- checker._import_name = mock.Mock(return_value='fake_module')
+ body = [TestTestChecker.get_mocked_ast_object(
+ 1, 4, 'unittest', 'mock', ast.ImportFrom)]
- checker._add_import_for_test_uuid(patcher, Fake_src_parsed(),
+ checker._add_import_for_test_uuid(patcher, Fake_src_parsed,
fake_file.name)
- (patch_id, patch), = patcher.patches.items()
- self.assertEqual(patcher._quote('\n' + check_uuid.IMPORT_LINE + '\n'),
- patch)
- self.assertEqual('{%s:s}' % patch_id,
- patcher.source_files[fake_file.name])
+ patcher.apply_patches()
+
+ with open(fake_file.name, "r") as f:
+ expected_result = source_code + '\n' + TestTestChecker.IMPORT_LINE
+ self.assertTrue(expected_result == f.read())
def test_add_import_for_test_uuid_tempest(self):
patcher = check_uuid.SourcePatcher()
checker = check_uuid.TestChecker(importlib.import_module('tempest'))
fake_file = tempfile.NamedTemporaryFile("w+t", delete=False)
- test1 = (" def test_test():\n"
- " pass\n")
- test2 = (" def test_another_test():\n"
- " pass\n")
- source_code = test1 + test2
+ source_code = "from tempest import a_fake_module\n"
fake_file.write(source_code)
fake_file.close()
- def fake_import_name(node):
- return node.name
- checker._import_name = fake_import_name
+ class Fake_src_parsed:
+ body = [TestTestChecker.get_mocked_ast_object(
+ 1, 4, 'tempest', 'a_fake_module', ast.ImportFrom)]
- class Fake_node():
- def __init__(self, lineno, col_offset, name):
- self.lineno = lineno
- self.col_offset = col_offset
- self.name = name
-
- class Fake_src_parsed():
- body = [Fake_node(1, 4, 'tempest.a_fake_module'),
- Fake_node(3, 4, 'another_fake_module')]
-
- checker._add_import_for_test_uuid(patcher, Fake_src_parsed(),
+ checker._add_import_for_test_uuid(patcher, Fake_src_parsed,
fake_file.name)
- (patch_id, patch), = patcher.patches.items()
- self.assertEqual(patcher._quote(check_uuid.IMPORT_LINE + '\n'),
- patch)
- expected_source = patcher._quote(test1) + '{' + patch_id + ':s}' +\
- patcher._quote(test2)
- self.assertEqual(expected_source,
- patcher.source_files[fake_file.name])
+ patcher.apply_patches()
+
+ with open(fake_file.name, "r") as f:
+ expected_result = source_code + TestTestChecker.IMPORT_LINE
+ self.assertTrue(expected_result == f.read())
+
+ def test_add_import_no_import(self):
+ patcher = check_uuid.SourcePatcher()
+ patcher.add_patch = mock.Mock()
+ checker = check_uuid.TestChecker(importlib.import_module('tempest'))
+ fake_file = tempfile.NamedTemporaryFile("w+t", delete=False)
+ fake_file.close()
+
+ class Fake_src_parsed:
+ body = []
+
+ checker._add_import_for_test_uuid(patcher, Fake_src_parsed,
+ fake_file.name)
+
+ self.assertTrue(not patcher.add_patch.called)
diff --git a/tempest/tests/lib/common/utils/test_test_utils.py b/tempest/tests/lib/common/utils/test_test_utils.py
index bdc0ea4..d8e3745 100644
--- a/tempest/tests/lib/common/utils/test_test_utils.py
+++ b/tempest/tests/lib/common/utils/test_test_utils.py
@@ -74,6 +74,17 @@
self.assertRaises(ValueError, test_utils.call_and_ignore_notfound_exc,
raise_value_error)
+ def test_call_and_ignore_notfound_exc_when_serverfault_raised(self):
+ calls = []
+
+ def raise_serverfault():
+ calls.append('call')
+ raise exceptions.ServerFault()
+ self.assertRaises(exceptions.ServerFault,
+ test_utils.call_and_ignore_notfound_exc,
+ raise_serverfault)
+ self.assertEqual(3, len(calls))
+
def test_call_and_ignore_notfound_exc(self):
m = mock.Mock(return_value=42)
args, kwargs = (1,), {'1': None}
diff --git a/tempest/tests/lib/services/identity/v3/test_access_rules_client.py b/tempest/tests/lib/services/identity/v3/test_access_rules_client.py
new file mode 100644
index 0000000..71c9cde
--- /dev/null
+++ b/tempest/tests/lib/services/identity/v3/test_access_rules_client.py
@@ -0,0 +1,97 @@
+# Copyright 2019 SUSE LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tempest.lib.services.identity.v3 import access_rules_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestAccessRulesClient(base.BaseServiceTest):
+ FAKE_LIST_ACCESS_RULES = {
+ "links": {
+ "self": "https://example.com/identity/v3/users/" +
+ "3e0716ae/access_rules",
+ "previous": None,
+ "next": None
+ },
+ "access_rules": [
+ {
+ "path": "/v2.0/metrics",
+ "links": {
+ "self": "https://example.com/identity/v3/access_rules/" +
+ "07d719df00f349ef8de77d542edf010c"
+ },
+ "id": "07d719df00f349ef8de77d542edf010c",
+ "service": "monitoring",
+ "method": "GET"
+ }
+ ]
+ }
+
+ FAKE_ACCESS_RULE_INFO = {
+ "access_rule": {
+ "path": "/v2.0/metrics",
+ "links": {
+ "self": "https://example.com/identity/v3/access_rules/" +
+ "07d719df00f349ef8de77d542edf010c"
+ },
+ "id": "07d719df00f349ef8de77d542edf010c",
+ "service": "monitoring",
+ "method": "GET"
+ }
+ }
+
+ def setUp(self):
+ super(TestAccessRulesClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.client = access_rules_client.AccessRulesClient(
+ fake_auth, 'identity', 'regionOne')
+
+ def _test_show_access_rule(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.show_access_rule,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_ACCESS_RULE_INFO,
+ bytes_body,
+ user_id="123456",
+ access_rule_id="5499a186")
+
+ def _test_list_access_rules(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_access_rules,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_LIST_ACCESS_RULES,
+ bytes_body,
+ user_id="123456")
+
+ def test_show_access_rule_with_str_body(self):
+ self._test_show_access_rule()
+
+ def test_show_access_rule_with_bytes_body(self):
+ self._test_show_access_rule(bytes_body=True)
+
+ def test_list_access_rule_with_str_body(self):
+ self._test_list_access_rules()
+
+ def test_list_access_rule_with_bytes_body(self):
+ self._test_list_access_rules(bytes_body=True)
+
+ def test_delete_access_rule(self):
+ self.check_service_client_function(
+ self.client.delete_access_rule,
+ 'tempest.lib.common.rest_client.RestClient.delete',
+ {},
+ user_id="123456",
+ access_rule_id="5499a186",
+ status=204)
diff --git a/tempest/tests/lib/services/identity/v3/test_endpoints_client.py b/tempest/tests/lib/services/identity/v3/test_endpoints_client.py
index ca15dd1..0efc462 100644
--- a/tempest/tests/lib/services/identity/v3/test_endpoints_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_endpoints_client.py
@@ -54,12 +54,44 @@
}
FAKE_SERVICE_ID = "a4dc5060-f757-4662-b658-edd2aefbb41d"
+ FAKE_ENDPOINT_ID = "b335d394-cdb9-4519-b95d-160b7706e54ew"
+
+ FAKE_UPDATE_ENDPOINT = {
+ "endpoint": {
+ "id": "828384",
+ "interface": "internal",
+ "links": {
+ "self": "http://example.com/identity/v3/"
+ "endpoints/828384"
+ },
+ "region_id": "north",
+ "service_id": "686766",
+ "url": "http://example.com/identity/v3/"
+ "endpoints/828384"
+ }
+ }
+
+ FAKE_SHOW_ENDPOINT = {
+ "endpoint": {
+ "enabled": True,
+ "id": "01c3d5b92f7841ac83fb4b26173c12c7",
+ "interface": "admin",
+ "links": {
+ "self": "http://example.com/identity/v3/"
+ "endpoints/828384"
+ },
+ "region": "RegionOne",
+ "region_id": "RegionOne",
+ "service_id": "3b2d6ad7e02c4cde8498a547601f1b8f",
+ "url": "http://23.253.211.234:9696/"
+ }
+ }
def setUp(self):
super(TestEndpointsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
- self.client = endpoints_client.EndPointsClient(fake_auth,
- 'identity', 'regionOne')
+ self.client = endpoints_client.EndPointsClient(
+ fake_auth, 'identity', 'regionOne')
def _test_create_endpoint(self, bytes_body=False):
self.check_service_client_function(
@@ -84,6 +116,38 @@
mock_args=[mock_args],
**params)
+ def _test_update_endpoint(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.update_endpoint,
+ 'tempest.lib.common.rest_client.RestClient.patch',
+ self.FAKE_UPDATE_ENDPOINT,
+ bytes_body,
+ endpoint_id=self.FAKE_ENDPOINT_ID,
+ interface="public",
+ region_id="north",
+ url="http://example.com/identity/v3/endpoints/828384",
+ service_id=self.FAKE_SERVICE_ID)
+
+ def _test_show_endpoint(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.show_endpoint,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_SHOW_ENDPOINT,
+ bytes_body,
+ endpoint_id="3456")
+
+ def test_update_endpoint_with_str_body(self):
+ self._test_update_endpoint()
+
+ def test_update_endpoint_with_bytes_body(self):
+ self._test_update_endpoint(bytes_body=True)
+
+ def test_show_endpoint_with_str_body(self):
+ self._test_show_endpoint()
+
+ def test_show_endpoint_with_bytes_body(self):
+ self._test_show_endpoint(bytes_body=True)
+
def test_create_endpoint_with_str_body(self):
self._test_create_endpoint()
diff --git a/tempest/tests/lib/services/identity/v3/test_users_client.py b/tempest/tests/lib/services/identity/v3/test_users_client.py
index c0dfdae..7be0480 100644
--- a/tempest/tests/lib/services/identity/v3/test_users_client.py
+++ b/tempest/tests/lib/services/identity/v3/test_users_client.py
@@ -141,6 +141,35 @@
]
}
+ FAKE_USER_EC2_CREDENTIAL_INFO = {
+ "credential": {
+ 'user_id': '9beb0e12f3e5416db8d7cccfc785db3b',
+ 'access': '79abf59acc77492a86170cbe2f1feafa',
+ 'secret': 'c4e7d3a691fd4563873d381a40320f46',
+ 'trust_id': None,
+ 'tenant_id': '596557269d7b4dd78631a602eb9f151d'
+ }
+ }
+
+ FAKE_LIST_USER_EC2_CREDENTIALS = {
+ "credentials": [
+ {
+ 'user_id': '9beb0e12f3e5416db8d7cccfc785db3b',
+ 'access': '79abf59acc77492a86170cbe2f1feafa',
+ 'secret': 'c4e7d3a691fd4563873d381a40320f46',
+ 'trust_id': None,
+ 'tenant_id': '596557269d7b4dd78631a602eb9f151d'
+ },
+ {
+ 'user_id': '3beb0e12f3e5416db8d7cccfc785de4r',
+ 'access': '45abf59acc77492a86170cbe2f1fesde',
+ 'secret': 'g4e7d3a691fd4563873d381a40320e45',
+ 'trust_id': None,
+ 'tenant_id': '123557269d7b4dd78631a602eb9f112f'
+ }
+ ]
+ }
+
def setUp(self):
super(TestUsersClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
@@ -201,6 +230,33 @@
user_id='817fb3c23fd7465ba6d7fe1b1320121d',
)
+ def _test_create_user_ec2_credential(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.create_user_ec2_credential,
+ 'tempest.lib.common.rest_client.RestClient.post',
+ self.FAKE_USER_EC2_CREDENTIAL_INFO,
+ bytes_body,
+ status=201,
+ user_id="1",
+ tenant_id="123")
+
+ def _test_show_user_ec2_credential(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.show_user_ec2_credential,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_USER_EC2_CREDENTIAL_INFO,
+ bytes_body,
+ user_id="1",
+ access="123")
+
+ def _test_list_user_ec2_credentials(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_user_ec2_credentials,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_LIST_USER_EC2_CREDENTIALS,
+ bytes_body,
+ user_id="1")
+
def test_create_user_with_string_body(self):
self._test_create_user()
@@ -255,3 +311,30 @@
user_id='817fb3c23fd7465ba6d7fe1b1320121d',
password='NewTempestPassword',
original_password='OldTempestPassword')
+
+ def test_create_user_ec2_credential_with_str_body(self):
+ self._test_create_user_ec2_credential()
+
+ def test_create_user_ec2_credential_with_bytes_body(self):
+ self._test_create_user_ec2_credential(bytes_body=True)
+
+ def test_show_user_ec2_credential_with_str_body(self):
+ self._test_show_user_ec2_credential()
+
+ def test_show_user_ec2_credential_with_bytes_body(self):
+ self._test_show_user_ec2_credential(bytes_body=True)
+
+ def test_list_user_ec2_credentials_with_str_body(self):
+ self._test_list_user_ec2_credentials()
+
+ def test_list_user_ec2_credentials_with_bytes_body(self):
+ self._test_list_user_ec2_credentials(bytes_body=True)
+
+ def test_delete_user_ec2_credential(self):
+ self.check_service_client_function(
+ self.client.delete_user_ec2_credential,
+ 'tempest.lib.common.rest_client.RestClient.delete',
+ {},
+ user_id="123",
+ access="1234",
+ status=204)
diff --git a/tempest/tests/lib/services/network/test_floating_ips_client.py b/tempest/tests/lib/services/network/test_floating_ips_client.py
index c5b1845..e8f2e5a 100644
--- a/tempest/tests/lib/services/network/test_floating_ips_client.py
+++ b/tempest/tests/lib/services/network/test_floating_ips_client.py
@@ -27,6 +27,8 @@
{
"router_id": "d23abc8d-2991-4a55-ba98-2aaea84cc72f",
"description": "for test",
+ "dns_domain": "my-domain.org.",
+ "dns_name": "myfip",
"created_at": "2016-12-21T10:55:50Z",
"updated_at": "2016-12-21T10:55:53Z",
"revision_number": 1,
@@ -37,11 +39,24 @@
"floating_ip_address": "172.24.4.228",
"port_id": "ce705c24-c1ef-408a-bda3-7bbd946164ab",
"id": "2f245a7b-796b-4f26-9cf9-9e82d248fda7",
- "status": "ACTIVE"
+ "status": "ACTIVE",
+ "port_details": {
+ "status": "ACTIVE",
+ "name": "",
+ "admin_state_up": True,
+ "network_id": "02dd8479-ef26-4398-a102-d19d0a7b3a1f",
+ "device_owner": "compute:nova",
+ "mac_address": "fa:16:3e:b1:3b:30",
+ "device_id": "8e3941b4-a6e9-499f-a1ac-2a4662025cba"
+ },
+ "tags": ["tag1,tag2"],
+ "port_forwardings": []
},
{
"router_id": None,
"description": "for test",
+ "dns_domain": "my-domain.org.",
+ "dns_name": "myfip2",
"created_at": "2016-12-21T11:55:50Z",
"updated_at": "2016-12-21T11:55:53Z",
"revision_number": 2,
@@ -52,7 +67,10 @@
"floating_ip_address": "172.24.4.227",
"port_id": None,
"id": "61cea855-49cb-4846-997d-801b70c71bdd",
- "status": "DOWN"
+ "status": "DOWN",
+ "port_details": None,
+ "tags": ["tag1,tag2"],
+ "port_forwardings": []
}
]
}
diff --git a/tempest/tests/lib/services/network/test_networks_client.py b/tempest/tests/lib/services/network/test_networks_client.py
index 078f4b0..17233bc 100644
--- a/tempest/tests/lib/services/network/test_networks_client.py
+++ b/tempest/tests/lib/services/network/test_networks_client.py
@@ -31,12 +31,17 @@
"nova"
],
"created_at": "2016-03-08T20:19:41",
+ "dns_domain": "my-domain.org.",
"id": "d32019d3-bc6e-4319-9c1d-6722fc136a22",
+ "ipv4_address_scope": None,
+ "ipv6_address_scope": None,
+ "l2_adjacency": False,
"mtu": 0,
"name": "net1",
"port_security_enabled": True,
"project_id": "4fd44f30292945e481c7b8a0c8908869",
"qos_policy_id": "6a8454ade84346f59e8d40665f878b2e",
+ "revision_number": 1,
"router:external": False,
"shared": False,
"status": "ACTIVE",
@@ -46,7 +51,8 @@
"tenant_id": "4fd44f30292945e481c7b8a0c8908869",
"updated_at": "2016-03-08T20:19:41",
"vlan_transparent": True,
- "description": ""
+ "description": "",
+ "is_default": False
},
{
"admin_state_up": True,
@@ -54,12 +60,18 @@
"availability_zones": [
"nova"
],
+ "created_at": "2016-03-08T20:19:41",
+ "dns_domain": "my-domain.org.",
"id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324",
+ "ipv4_address_scope": None,
+ "ipv6_address_scope": None,
+ "l2_adjacency": False,
"mtu": 0,
"name": "net2",
"port_security_enabled": True,
"project_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
"qos_policy_id": "bfdb6c39f71e4d44b1dfbda245c50819",
+ "revision_number": 3,
"router:external": False,
"shared": False,
"status": "ACTIVE",
@@ -69,7 +81,8 @@
"tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
"updated_at": "2016-03-08T20:19:41",
"vlan_transparent": False,
- "description": ""
+ "description": "",
+ "is_default": False
}
]
}
@@ -108,6 +121,7 @@
"alive": True,
"topic": "dhcp_agent",
"host": "osboxes",
+ "ha_state": None,
"agent_type": "DHCP agent",
"resource_versions": {},
"created_at": "2017-06-19 21:39:51",
diff --git a/tempest/tests/lib/services/placement/test_resource_providers_client.py b/tempest/tests/lib/services/placement/test_resource_providers_client.py
new file mode 100644
index 0000000..11aeaf2
--- /dev/null
+++ b/tempest/tests/lib/services/placement/test_resource_providers_client.py
@@ -0,0 +1,119 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.placement import resource_providers_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestResourceProvidersClient(base.BaseServiceTest):
+ FAKE_RESOURCE_PROVIDER_UUID = '3722a86e-a563-11e9-9abb-c3d41b6d3abf'
+ FAKE_ROOT_PROVIDER_UUID = '4a6a57c8-a563-11e9-914e-f3e0478fce53'
+ FAKE_RESOURCE_PROVIDER = {
+ 'generation': 0,
+ 'name': 'Ceph Storage Pool',
+ 'uuid': FAKE_RESOURCE_PROVIDER_UUID,
+ 'parent_provider_uuid': FAKE_ROOT_PROVIDER_UUID,
+ 'root_provider_uuid': FAKE_ROOT_PROVIDER_UUID
+ }
+
+ FAKE_RESOURCE_PROVIDERS = {
+ 'resource_providers': [FAKE_RESOURCE_PROVIDER]
+ }
+
+ FAKE_RESOURCE_PROVIDER_INVENTORIES = {
+ 'inventories': {
+ 'DISK_GB': {
+ 'allocation_ratio': 1.0,
+ 'max_unit': 35,
+ 'min_unit': 1,
+ 'reserved': 0,
+ 'step_size': 1,
+ 'total': 35
+ }
+ },
+ 'resource_provider_generation': 7
+ }
+
+ FAKE_AGGREGATE_UUID = '1166be40-a567-11e9-9f2a-53827f9311fa'
+ FAKE_RESOURCE_PROVIDER_AGGREGATES = {
+ 'aggregates': [FAKE_AGGREGATE_UUID]
+ }
+
+ def setUp(self):
+ super(TestResourceProvidersClient, self).setUp()
+ fake_auth = fake_auth_provider.FakeAuthProvider()
+ self.client = resource_providers_client.ResourceProvidersClient(
+ fake_auth, 'placement', 'regionOne')
+
+ def _test_list_resource_providers(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_resource_providers,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_RESOURCE_PROVIDERS,
+ to_utf=bytes_body,
+ status=200
+ )
+
+ def test_list_resource_providers_with_bytes_body(self):
+ self._test_list_resource_providers()
+
+ def test_list_resource_providers_with_str_body(self):
+ self._test_list_resource_providers(bytes_body=True)
+
+ def _test_show_resource_provider(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.show_resource_provider,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_RESOURCE_PROVIDER,
+ to_utf=bytes_body,
+ status=200,
+ rp_uuid=self.FAKE_RESOURCE_PROVIDER_UUID
+ )
+
+ def test_show_resource_provider_with_str_body(self):
+ self._test_show_resource_provider()
+
+ def test_show_resource_provider_with_bytes_body(self):
+ self._test_show_resource_provider(bytes_body=True)
+
+ def _test_list_resource_provider_inventories(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_resource_provider_inventories,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_RESOURCE_PROVIDER_INVENTORIES,
+ to_utf=bytes_body,
+ status=200,
+ rp_uuid=self.FAKE_RESOURCE_PROVIDER_UUID
+ )
+
+ def test_list_resource_provider_inventories_with_str_body(self):
+ self._test_list_resource_provider_inventories()
+
+ def test_list_resource_provider_inventories_with_bytes_body(self):
+ self._test_list_resource_provider_inventories(bytes_body=True)
+
+ def _test_list_resource_provider_aggregates(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.list_resource_provider_aggregates,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_RESOURCE_PROVIDER_AGGREGATES,
+ to_utf=bytes_body,
+ status=200,
+ rp_uuid=self.FAKE_RESOURCE_PROVIDER_UUID
+ )
+
+ def test_list_resource_provider_aggregates_with_str_body(self):
+ self._test_list_resource_provider_aggregates()
+
+ def test_list_resource_provider_aggregates_with_bytes_body(self):
+ self._test_list_resource_provider_aggregates(bytes_body=True)
diff --git a/tempest/tests/lib/services/volume/v3/test_types_client.py b/tempest/tests/lib/services/volume/v3/test_types_client.py
index 336aa32..19d6591 100644
--- a/tempest/tests/lib/services/volume/v3/test_types_client.py
+++ b/tempest/tests/lib/services/volume/v3/test_types_client.py
@@ -121,6 +121,13 @@
to_utf=bytes_body,
volume_type_id="6685584b-1eac-4da6-b5c3-555430cf68ff")
+ def _test_show_default_volume_type(self, bytes_body=False):
+ self.check_service_client_function(
+ self.client.show_default_volume_type,
+ 'tempest.lib.common.rest_client.RestClient.get',
+ self.FAKE_DEFAULT_VOLUME_TYPE_INFO,
+ to_utf=bytes_body)
+
def _test_create_volume_type(self, bytes_body=False):
self.check_service_client_function(
self.client.create_volume_type,
@@ -224,6 +231,12 @@
def test_show_volume_type_with_bytes_body(self):
self._test_show_volume_type(bytes_body=True)
+ def test_show_default_volume_type_with_str_body(self):
+ self._test_show_default_volume_type()
+
+ def test_show_default_volume_type_with_bytes_body(self):
+ self._test_show_default_volume_type(bytes_body=True)
+
def test_create_volume_type_str_body(self):
self._test_create_volume_type()
diff --git a/tools/tempest-integrated-gate-networking-blacklist.txt b/tools/tempest-integrated-gate-networking-blacklist.txt
index 97808d9..263b2e4 100644
--- a/tools/tempest-integrated-gate-networking-blacklist.txt
+++ b/tools/tempest-integrated-gate-networking-blacklist.txt
@@ -17,8 +17,3 @@
tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_boot_server_from_encrypted_volume_luks
tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_image_defined_boot_from_volume
tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_create_server_from_volume_snapshot
-
-# TODO(gmann): Remove these test skip once nova bug #1882521 is solved
-tempest.api.compute.volumes.test_attach_volume.AttachVolumeMultiAttachTest.test_resize_server_with_multiattached_volume
-tempest.api.compute.servers.test_server_rescue_negative.ServerRescueNegativeTestJSON
-tempest.api.compute.servers.test_server_rescue.ServerStableDeviceRescueTest.test_stable_device_rescue_disk_virtio_with_volume_attached
diff --git a/tools/tempest-integrated-gate-placement-blacklist.txt b/tools/tempest-integrated-gate-placement-blacklist.txt
index 657bda2..efba796 100644
--- a/tools/tempest-integrated-gate-placement-blacklist.txt
+++ b/tools/tempest-integrated-gate-placement-blacklist.txt
@@ -17,8 +17,3 @@
tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_boot_server_from_encrypted_volume_luks
tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_image_defined_boot_from_volume
tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_create_server_from_volume_snapshot
-
-# TODO(gmann): Remove these test skip once nova bug #1882521 is solved
-tempest.api.compute.volumes.test_attach_volume.AttachVolumeMultiAttachTest.test_resize_server_with_multiattached_volume
-tempest.api.compute.servers.test_server_rescue_negative.ServerRescueNegativeTestJSON
-tempest.api.compute.servers.test_server_rescue.ServerStableDeviceRescueTest.test_stable_device_rescue_disk_virtio_with_volume_attached
diff --git a/tools/tempest-integrated-gate-storage-blacklist.txt b/tools/tempest-integrated-gate-storage-blacklist.txt
index cbd3e9d..1ef6bb5 100644
--- a/tools/tempest-integrated-gate-storage-blacklist.txt
+++ b/tools/tempest-integrated-gate-storage-blacklist.txt
@@ -12,8 +12,3 @@
tempest.scenario.test_network_basic_ops.TestNetworkBasicOps
tempest.scenario.test_network_v6.TestGettingAddress
tempest.scenario.test_security_groups_basic_ops.TestSecurityGroupsBasicOps
-
-# TODO(gmann): Remove these test skip once nova bug #1882521 is solved
-tempest.api.compute.volumes.test_attach_volume.AttachVolumeMultiAttachTest.test_resize_server_with_multiattached_volume
-tempest.api.compute.servers.test_server_rescue_negative.ServerRescueNegativeTestJSON
-tempest.api.compute.servers.test_server_rescue.ServerStableDeviceRescueTest.test_stable_device_rescue_disk_virtio_with_volume_attached
diff --git a/tox.ini b/tox.ini
index 031a400..d8e059a 100644
--- a/tox.ini
+++ b/tox.ini
@@ -23,7 +23,7 @@
OS_STDERR_CAPTURE=1
OS_TEST_TIMEOUT=160
PYTHONWARNINGS=default::DeprecationWarning,ignore::DeprecationWarning:distutils,ignore::DeprecationWarning:site
-passenv = OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_TEST_TIMEOUT OS_TEST_LOCK_PATH TEMPEST_CONFIG TEMPEST_CONFIG_DIR http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY ZUUL_CACHE_DIR REQUIREMENTS_PIP_LOCATION GENERATE_TEMPEST_PLUGIN_LIST GABBI_TEMPEST_PATH
+passenv = OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_TEST_TIMEOUT OS_TEST_LOCK_PATH TEMPEST_CONFIG TEMPEST_CONFIG_DIR http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY ZUUL_CACHE_DIR REQUIREMENTS_PIP_LOCATION GENERATE_TEMPEST_PLUGIN_LIST
usedevelop = True
install_command = pip install {opts} {packages}
whitelist_externals = *
@@ -279,7 +279,6 @@
[testenv:docs]
deps =
-c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
- -r{toxinidir}/requirements.txt
-r{toxinidir}/doc/requirements.txt
commands =
sphinx-apidoc -f -o doc/source/tests/compute tempest/api/compute
@@ -365,7 +364,6 @@
[testenv:releasenotes]
deps =
-c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
- -r{toxinidir}/requirements.txt
-r{toxinidir}/doc/requirements.txt
commands =
rm -rf releasenotes/build