Merge "Sahara: add API tests for cluster templates"
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index 4a567e7..a744339 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -211,13 +211,14 @@
# admin credentials are known. (boolean value)
#allow_tenant_isolation=false
-# Valid primary image reference to be used in tests. (string
-# value)
-#image_ref={$IMAGE_ID}
+# Valid primary image reference to be used in tests. This is a
+# required option (string value)
+#image_ref=<None>
-# Valid secondary image reference to be used in tests. (string
-# value)
-#image_ref_alt={$IMAGE_ID_ALT}
+# Valid secondary image reference to be used in tests. This is
+# a required option, but if only one image is available
+# duplicate the value of image_ref above (string value)
+#image_ref_alt=<None>
# Valid primary flavor to use in tests. (string value)
#flavor_ref=1
@@ -241,7 +242,7 @@
#image_alt_ssh_password=password
# Time in seconds between build status checks. (integer value)
-#build_interval=10
+#build_interval=1
# Timeout in seconds to wait for an instance to build.
# (integer value)
@@ -332,6 +333,9 @@
# admin credentials are known. (boolean value)
#allow_tenant_isolation=false
+# Time in seconds between build status checks. (integer value)
+#build_interval=1
+
[compute-admin]
@@ -350,6 +354,10 @@
# API key to use when authenticating as admin. (string value)
#password=<None>
+# Domain name for authentication as admin (Keystone V3).The
+# same domain applies to user and project (string value)
+#domain_name=<None>
+
[compute-feature-enabled]
@@ -401,6 +409,14 @@
# as [nova.vnc]->vnc_enabled in nova.conf (boolean value)
#vnc_console=false
+# Enable Spice console. This configuration value should be
+# same as [nova.spice]->enabled in nova.conf (boolean value)
+#spice_console=false
+
+# Enable RDP console. This configuration value should be same
+# as [nova.rdp]->enabled in nova.conf (boolean value)
+#rdp_console=false
+
[dashboard]
@@ -442,6 +458,10 @@
# value)
#db_flavor_ref=1
+# Current database version to use in database tests. (string
+# value)
+#db_current_version=v1.0
+
[debug]
@@ -516,6 +536,10 @@
# API key to use when authenticating. (string value)
#password=<None>
+# Domain name for authentication (Keystone V3).The same domain
+# applies to user and project (string value)
+#domain_name=<None>
+
# Username of alternate user to use for Nova API requests.
# (string value)
#alt_username=<None>
@@ -528,6 +552,10 @@
# (string value)
#alt_password=<None>
+# Alternate domain name for authentication (Keystone V3).The
+# same domain applies to user and project (string value)
+#alt_domain_name=<None>
+
# Administrative Username to use for Keystone API requests.
# (string value)
#admin_username=<None>
@@ -539,6 +567,10 @@
# API key to use when authenticating as admin. (string value)
#admin_password=<None>
+# Admin domain name for authentication (Keystone V3).The same
+# domain applies to user and project (string value)
+#admin_domain_name=<None>
+
[identity-feature-enabled]
@@ -677,7 +709,7 @@
# Time in seconds between network operation status checks.
# (integer value)
-#build_interval=10
+#build_interval=1
[network-feature-enabled]
@@ -760,9 +792,6 @@
# (string value)
#endpoint_type=publicURL
-# Time in seconds between build status checks. (integer value)
-#build_interval=1
-
# Timeout in seconds to wait for a stack to build. (integer
# value)
#build_timeout=1200
@@ -797,6 +826,10 @@
# Catalog type of the Queuing service. (string value)
#catalog_type=queuing
+# The maximum number of queue records per page when listing
+# queues (integer value)
+#max_queues_per_page=20
+
[scenario]
@@ -943,6 +976,10 @@
# value)
#endpoint_type=publicURL
+# This variable is used as flag to enable notification tests
+# (boolean value)
+#too_slow_to_test=true
+
[volume]
@@ -952,7 +989,7 @@
# Time in seconds between volume availability checks. (integer
# value)
-#build_interval=10
+#build_interval=1
# Timeout in seconds to wait for a volume to becomeavailable.
# (integer value)
diff --git a/requirements.txt b/requirements.txt
index e97eece..f907e7d 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -5,22 +5,21 @@
testtools>=0.9.34
lxml>=2.3
boto>=2.12.0,!=2.13.0
-paramiko>=1.9.0
+paramiko>=1.13.0
netaddr>=0.7.6
python-glanceclient>=0.9.0
-python-keystoneclient>=0.7.0
+python-keystoneclient>=0.8.0
python-novaclient>=2.17.0
python-neutronclient>=2.3.4,<3
python-cinderclient>=1.0.6
-python-heatclient>=0.2.3
+python-heatclient>=0.2.9
python-ironicclient
python-saharaclient>=0.6.0
-python-swiftclient>=1.6
+python-swiftclient>=2.0.2
testresources>=0.2.4
-keyring>=2.1
testrepository>=0.0.18
oslo.config>=1.2.0
-six>=1.5.2
+six>=1.6.0
iso8601>=0.1.9
fixtures>=0.3.14
testscenarios>=0.4
diff --git a/setup.cfg b/setup.cfg
index a701572..5c62710 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,6 +1,6 @@
[metadata]
name = tempest
-version = 2014.1
+version = 2
summary = OpenStack Integration Testing
description-file =
README.rst
@@ -17,6 +17,12 @@
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
+[entry_points]
+console_scripts =
+ verify-tempest-config = tempest.cmd.verify_tempest_config:main
+ javelin2 = tempest.cmd.javelin:main
+ run-tempest-stress = tempest.cmd.run_stress:main
+
[build_sphinx]
all_files = 1
build-dir = doc/build
diff --git a/setup.py b/setup.py
index 70c2b3f..7363757 100755
--- a/setup.py
+++ b/setup.py
@@ -17,6 +17,14 @@
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
+# In python < 2.7.4, a lazy loading of package `pbr` will break
+# setuptools if some other modules registered functions in `atexit`.
+# solution from: http://bugs.python.org/issue15881#msg170215
+try:
+ import multiprocessing # noqa
+except ImportError:
+ pass
+
setuptools.setup(
setup_requires=['pbr'],
pbr=True)
diff --git a/tempest/api/baremetal/test_ports.py b/tempest/api/baremetal/test_ports.py
index 8b76811..aeb77e3 100644
--- a/tempest/api/baremetal/test_ports.py
+++ b/tempest/api/baremetal/test_ports.py
@@ -165,8 +165,8 @@
resp, body = self.client.list_ports_detail()
self.assertEqual(200, resp.status)
- ports_dict = {port['uuid']: port for port in body['ports']
- if port['uuid'] in uuids}
+ ports_dict = dict((port['uuid'], port) for port in body['ports']
+ if port['uuid'] in uuids)
for uuid in uuids:
self.assertIn(uuid, ports_dict)
diff --git a/tempest/api/compute/admin/test_availability_zone.py b/tempest/api/compute/admin/test_availability_zone.py
index 3c06624..9555367 100644
--- a/tempest/api/compute/admin/test_availability_zone.py
+++ b/tempest/api/compute/admin/test_availability_zone.py
@@ -17,15 +17,15 @@
from tempest import test
-class AZAdminTestJSON(base.BaseV2ComputeAdminTest):
-
+class AZAdminV3Test(base.BaseComputeAdminTest):
"""
Tests Availability Zone API List
"""
+ _api_version = 3
@classmethod
def setUpClass(cls):
- super(AZAdminTestJSON, cls).setUpClass()
+ super(AZAdminV3Test, cls).setUpClass()
cls.client = cls.os_adm.availability_zone_client
@test.attr(type='gate')
@@ -44,5 +44,9 @@
self.assertTrue(len(availability_zone) > 0)
-class AZAdminTestXML(AZAdminTestJSON):
+class AZAdminV2TestJSON(AZAdminV3Test):
+ _api_version = 2
+
+
+class AZAdminV2TestXML(AZAdminV2TestJSON):
_interface = 'xml'
diff --git a/tempest/api/compute/admin/test_flavors_access.py b/tempest/api/compute/admin/test_flavors_access.py
index 193d415..3ba7314 100644
--- a/tempest/api/compute/admin/test_flavors_access.py
+++ b/tempest/api/compute/admin/test_flavors_access.py
@@ -32,15 +32,12 @@
msg = "FlavorExtraData extension not enabled."
raise cls.skipException(msg)
+ # Compute admin flavor client
cls.client = cls.os_adm.flavors_client
- admin_client = cls._get_identity_admin_client()
- cls.tenant = admin_client.get_tenant_by_name(cls.flavors_client.
- tenant_name)
- cls.tenant_id = cls.tenant['id']
- cls.adm_tenant = admin_client.get_tenant_by_name(cls.os_adm.
- flavors_client.
- tenant_name)
- cls.adm_tenant_id = cls.adm_tenant['id']
+ # Non admin tenant ID
+ cls.tenant_id = cls.flavors_client.tenant_id
+ # Compute admin tenant ID
+ cls.adm_tenant_id = cls.client.tenant_id
cls.flavor_name_prefix = 'test_flavor_access_'
cls.ram = 512
cls.vcpus = 1
diff --git a/tempest/api/compute/admin/test_flavors_access_negative.py b/tempest/api/compute/admin/test_flavors_access_negative.py
index 8fe3331..73834e9 100644
--- a/tempest/api/compute/admin/test_flavors_access_negative.py
+++ b/tempest/api/compute/admin/test_flavors_access_negative.py
@@ -36,14 +36,7 @@
raise cls.skipException(msg)
cls.client = cls.os_adm.flavors_client
- admin_client = cls._get_identity_admin_client()
- cls.tenant = admin_client.get_tenant_by_name(cls.flavors_client.
- tenant_name)
- cls.tenant_id = cls.tenant['id']
- cls.adm_tenant = admin_client.get_tenant_by_name(cls.os_adm.
- flavors_client.
- tenant_name)
- cls.adm_tenant_id = cls.adm_tenant['id']
+ cls.tenant_id = cls.flavors_client.tenant_id
cls.flavor_name_prefix = 'test_flavor_access_'
cls.ram = 512
cls.vcpus = 1
diff --git a/tempest/api/compute/admin/test_quotas.py b/tempest/api/compute/admin/test_quotas.py
index 32e0478..348666d 100644
--- a/tempest/api/compute/admin/test_quotas.py
+++ b/tempest/api/compute/admin/test_quotas.py
@@ -28,8 +28,7 @@
# NOTE(afazekas): these test cases should always create and use a new
# tenant most of them should be skipped if we can't do that
- cls.demo_tenant_id = cls.isolated_creds.get_primary_user().get(
- 'tenantId')
+ cls.demo_tenant_id = cls.quotas_client.tenant_id
cls.default_quota_set = set(('injected_file_content_bytes',
'metadata_items', 'injected_files',
diff --git a/tempest/api/compute/admin/test_quotas_negative.py b/tempest/api/compute/admin/test_quotas_negative.py
index 5b2b5fd..e1dc685 100644
--- a/tempest/api/compute/admin/test_quotas_negative.py
+++ b/tempest/api/compute/admin/test_quotas_negative.py
@@ -33,8 +33,7 @@
# NOTE(afazekas): these test cases should always create and use a new
# tenant most of them should be skipped if we can't do that
- cls.demo_tenant_id = cls.isolated_creds.get_primary_user().get(
- 'tenantId')
+ cls.demo_tenant_id = cls.client.tenant_id
@test.attr(type=['negative', 'gate'])
def test_update_quota_normal_user(self):
diff --git a/tempest/api/compute/admin/test_servers_negative.py b/tempest/api/compute/admin/test_servers_negative.py
index 9fa07f6..8b3a0b5 100644
--- a/tempest/api/compute/admin/test_servers_negative.py
+++ b/tempest/api/compute/admin/test_servers_negative.py
@@ -37,10 +37,7 @@
cls.client = cls.os_adm.servers_client
cls.non_adm_client = cls.servers_client
cls.flavors_client = cls.os_adm.flavors_client
- cls.identity_client = cls._get_identity_admin_client()
- tenant = cls.identity_client.get_tenant_by_name(
- cls.client.tenant_name)
- cls.tenant_id = tenant['id']
+ cls.tenant_id = cls.client.tenant_id
cls.s1_name = data_utils.rand_name('server')
resp, server = cls.create_test_server(name=cls.s1_name,
diff --git a/tempest/api/compute/admin/test_simple_tenant_usage.py b/tempest/api/compute/admin/test_simple_tenant_usage.py
index 33cd6f3..f3a81d1 100644
--- a/tempest/api/compute/admin/test_simple_tenant_usage.py
+++ b/tempest/api/compute/admin/test_simple_tenant_usage.py
@@ -27,11 +27,7 @@
super(TenantUsagesTestJSON, cls).setUpClass()
cls.adm_client = cls.os_adm.tenant_usages_client
cls.client = cls.os.tenant_usages_client
- cls.identity_client = cls._get_identity_admin_client()
-
- resp, tenants = cls.identity_client.list_tenants()
- cls.tenant_id = [tnt['id'] for tnt in tenants if tnt['name'] ==
- cls.client.tenant_name][0]
+ cls.tenant_id = cls.client.tenant_id
# Create a server in the demo tenant
resp, server = cls.create_test_server(wait_until='ACTIVE')
diff --git a/tempest/api/compute/admin/test_simple_tenant_usage_negative.py b/tempest/api/compute/admin/test_simple_tenant_usage_negative.py
index a080f2e..d69c43c 100644
--- a/tempest/api/compute/admin/test_simple_tenant_usage_negative.py
+++ b/tempest/api/compute/admin/test_simple_tenant_usage_negative.py
@@ -52,11 +52,9 @@
params = {'start': self.end,
'end': self.start}
resp, tenants = self.identity_client.list_tenants()
- tenant_id = [tnt['id'] for tnt in tenants if tnt['name'] ==
- self.client.tenant_name][0]
self.assertRaises(exceptions.BadRequest,
self.adm_client.get_tenant_usage,
- tenant_id, params)
+ self.client.tenant_id, params)
@test.attr(type=['negative', 'gate'])
def test_list_usage_all_tenants_with_non_admin_user(self):
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 7a3798c..7c70aec 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -30,12 +30,16 @@
class BaseComputeTest(tempest.test.BaseTestCase):
"""Base test case class for all Compute API tests."""
+ _api_version = 3
force_tenant_isolation = False
@classmethod
def setUpClass(cls):
+ cls.set_network_resources()
super(BaseComputeTest, cls).setUpClass()
+ # TODO(andreaf) WE should care also for the alt_manager here
+ # but only once client lazy load in the manager is done
os = cls.get_client_manager()
cls.os = os
@@ -52,6 +56,58 @@
cls.images = []
cls.multi_user = cls.get_multi_user()
cls.security_groups = []
+ cls.server_groups = []
+
+ if cls._api_version == 2:
+ cls.servers_client = cls.os.servers_client
+ cls.flavors_client = cls.os.flavors_client
+ cls.images_client = cls.os.images_client
+ cls.extensions_client = cls.os.extensions_client
+ cls.floating_ips_client = cls.os.floating_ips_client
+ cls.keypairs_client = cls.os.keypairs_client
+ cls.security_groups_client = cls.os.security_groups_client
+ cls.quotas_client = cls.os.quotas_client
+ cls.limits_client = cls.os.limits_client
+ cls.volumes_extensions_client = cls.os.volumes_extensions_client
+ cls.volumes_client = cls.os.volumes_client
+ cls.interfaces_client = cls.os.interfaces_client
+ cls.fixed_ips_client = cls.os.fixed_ips_client
+ cls.availability_zone_client = cls.os.availability_zone_client
+ cls.agents_client = cls.os.agents_client
+ cls.aggregates_client = cls.os.aggregates_client
+ cls.services_client = cls.os.services_client
+ cls.instance_usages_audit_log_client = \
+ cls.os.instance_usages_audit_log_client
+ cls.hypervisor_client = cls.os.hypervisor_client
+ cls.certificates_client = cls.os.certificates_client
+ cls.migrations_client = cls.os.migrations_client
+
+ elif cls._api_version == 3:
+ if not CONF.compute_feature_enabled.api_v3:
+ skip_msg = ("%s skipped as nova v3 api is not available" %
+ cls.__name__)
+ raise cls.skipException(skip_msg)
+ cls.servers_client = cls.os.servers_v3_client
+ cls.images_client = cls.os.image_client
+ cls.flavors_client = cls.os.flavors_v3_client
+ cls.services_client = cls.os.services_v3_client
+ cls.extensions_client = cls.os.extensions_v3_client
+ cls.availability_zone_client = cls.os.availability_zone_v3_client
+ cls.interfaces_client = cls.os.interfaces_v3_client
+ cls.hypervisor_client = cls.os.hypervisor_v3_client
+ cls.keypairs_client = cls.os.keypairs_v3_client
+ cls.volumes_client = cls.os.volumes_client
+ cls.certificates_client = cls.os.certificates_v3_client
+ cls.keypairs_client = cls.os.keypairs_v3_client
+ cls.aggregates_client = cls.os.aggregates_v3_client
+ cls.hosts_client = cls.os.hosts_v3_client
+ cls.quotas_client = cls.os.quotas_v3_client
+ cls.version_client = cls.os.version_v3_client
+ cls.migrations_client = cls.os.migrations_v3_client
+ else:
+ msg = ("Unexpected API version is specified (%s)" %
+ cls._api_version)
+ raise exceptions.InvalidConfiguration(message=msg)
@classmethod
def get_multi_user(cls):
@@ -91,6 +147,26 @@
pass
@classmethod
+ def server_check_teardown(cls):
+ """Checks is the shared server clean enough for subsequent test.
+ Method will delete the server when it's dirty.
+ The setUp method is responsible for creating a new server.
+ Exceptions raised in tearDown class are fails the test case,
+ This method supposed to use only by tierDown methods, when
+ the shared server_id is stored in the server_id of the class.
+ """
+ if getattr(cls, 'server_id', None) is not None:
+ try:
+ cls.servers_client.wait_for_server_status(cls.server_id,
+ 'ACTIVE')
+ except Exception as exc:
+ LOG.exception(exc)
+ cls.servers_client.delete_server(cls.server_id)
+ cls.servers_client.wait_for_server_termination(cls.server_id)
+ cls.server_id = None
+ raise
+
+ @classmethod
def clear_images(cls):
for image_id in cls.images:
try:
@@ -100,7 +176,6 @@
pass
except Exception:
LOG.exception('Exception raised deleting image %s' % image_id)
- pass
@classmethod
def clear_security_groups(cls):
@@ -115,7 +190,18 @@
LOG.info('Exception raised deleting security group %s',
sg['id'])
LOG.exception(exc)
+
+ @classmethod
+ def clear_server_groups(cls):
+ for server_group_id in cls.server_groups:
+ try:
+ cls.client.delete_server_group(server_group_id)
+ except exceptions.NotFound:
+ # The server-group may have already been deleted which is OK.
pass
+ except Exception:
+ LOG.exception('Exception raised deleting server-group %s',
+ server_group_id)
@classmethod
def tearDownClass(cls):
@@ -123,6 +209,7 @@
cls.clear_servers()
cls.clear_security_groups()
cls.clear_isolated_creds()
+ cls.clear_server_groups()
super(BaseComputeTest, cls).tearDownClass()
@classmethod
@@ -176,6 +263,16 @@
return resp, body
+ @classmethod
+ def create_test_server_group(cls, name="", policy=[]):
+ if not name:
+ name = data_utils.rand_name(cls.__name__ + "-Server-Group")
+ if not policy:
+ policy = ['affinity']
+ resp, body = cls.servers_client.create_server_group(name, policy)
+ cls.server_groups.append(body)
+ return resp, body
+
def wait_for(self, condition):
"""Repeatedly calls condition() until a timeout."""
start_time = int(time.time())
@@ -210,39 +307,6 @@
cls.set_network_resources(network=True, subnet=True, router=True,
dhcp=True)
-
-class BaseV2ComputeTest(BaseComputeTest):
-
- _interface = "json"
-
- @classmethod
- def setUpClass(cls):
- # By default compute tests do not create network resources
- cls.set_network_resources()
- super(BaseV2ComputeTest, cls).setUpClass()
- cls.servers_client = cls.os.servers_client
- cls.flavors_client = cls.os.flavors_client
- cls.images_client = cls.os.images_client
- cls.extensions_client = cls.os.extensions_client
- cls.floating_ips_client = cls.os.floating_ips_client
- cls.keypairs_client = cls.os.keypairs_client
- cls.security_groups_client = cls.os.security_groups_client
- cls.quotas_client = cls.os.quotas_client
- cls.limits_client = cls.os.limits_client
- cls.volumes_extensions_client = cls.os.volumes_extensions_client
- cls.volumes_client = cls.os.volumes_client
- cls.interfaces_client = cls.os.interfaces_client
- cls.fixed_ips_client = cls.os.fixed_ips_client
- cls.availability_zone_client = cls.os.availability_zone_client
- cls.agents_client = cls.os.agents_client
- cls.aggregates_client = cls.os.aggregates_client
- cls.services_client = cls.os.services_client
- cls.instance_usages_audit_log_client = \
- cls.os.instance_usages_audit_log_client
- cls.hypervisor_client = cls.os.hypervisor_client
- cls.certificates_client = cls.os.certificates_client
- cls.migrations_client = cls.os.migrations_client
-
@classmethod
def create_image_from_server(cls, server_id, **kwargs):
"""Wrapper utility that returns an image created from the server."""
@@ -250,21 +314,25 @@
if 'name' in kwargs:
name = kwargs.pop('name')
- resp, image = cls.images_client.create_image(
- server_id, name)
+ if cls._api_version == 2:
+ resp, image = cls.images_client.create_image(server_id, name)
+ elif cls._api_version == 3:
+ resp, image = cls.servers_client.create_image(server_id, name)
image_id = data_utils.parse_image_id(resp['location'])
cls.images.append(image_id)
if 'wait_until' in kwargs:
cls.images_client.wait_for_image_status(image_id,
kwargs['wait_until'])
- resp, image = cls.images_client.get_image(image_id)
+ if cls._api_version == 2:
+ resp, image = cls.images_client.get_image(image_id)
+ elif cls._api_version == 3:
+ resp, image = cls.images_client.get_image_meta(image_id)
if kwargs['wait_until'] == 'ACTIVE':
if kwargs.get('wait_for_server', True):
cls.servers_client.wait_for_server_status(server_id,
'ACTIVE')
-
return resp, image
@classmethod
@@ -276,150 +344,72 @@
cls.servers_client.wait_for_server_termination(server_id)
except Exception:
LOG.exception('Failed to delete server %s' % server_id)
- pass
resp, server = cls.create_test_server(wait_until='ACTIVE', **kwargs)
- cls.password = server['adminPass']
+ if cls._api_version == 2:
+ cls.password = server['adminPass']
+ elif cls._api_version == 3:
+ cls.password = server['admin_password']
return server['id']
@classmethod
def delete_volume(cls, volume_id):
"""Deletes the given volume and waits for it to be gone."""
- cls._delete_volume(cls.volumes_extensions_client, volume_id)
+ if cls._api_version == 2:
+ cls._delete_volume(cls.volumes_extensions_client, volume_id)
+ elif cls._api_version == 3:
+ cls._delete_volume(cls.volumes_client, volume_id)
-class BaseV2ComputeAdminTest(BaseV2ComputeTest):
- """Base test case class for Compute Admin V2 API tests."""
-
- @classmethod
- def setUpClass(cls):
- super(BaseV2ComputeAdminTest, cls).setUpClass()
- admin_username = CONF.compute_admin.username
- admin_password = CONF.compute_admin.password
- admin_tenant = CONF.compute_admin.tenant_name
- if not (admin_username and admin_password and admin_tenant):
- msg = ("Missing Compute Admin API credentials "
- "in configuration.")
- raise cls.skipException(msg)
- if (CONF.compute.allow_tenant_isolation or
- cls.force_tenant_isolation is True):
- creds = cls.isolated_creds.get_admin_creds()
- admin_username, admin_tenant_name, admin_password = creds
- cls.os_adm = clients.Manager(username=admin_username,
- password=admin_password,
- tenant_name=admin_tenant_name,
- interface=cls._interface)
- else:
- cls.os_adm = clients.ComputeAdminManager(interface=cls._interface)
+class BaseV2ComputeTest(BaseComputeTest):
+ _api_version = 2
+ _interface = "json"
class BaseV3ComputeTest(BaseComputeTest):
+ _api_version = 3
+ _interface = "json"
+
+class BaseComputeAdminTest(BaseComputeTest):
+ """Base test case class for Compute Admin API tests."""
_interface = "json"
@classmethod
def setUpClass(cls):
- # By default compute tests do not create network resources
- if cls._interface == "xml":
- skip_msg = ("XML interface is being removed from Nova v3. "
- "%s will be removed shortly" % cls.__name__)
- raise cls.skipException(skip_msg)
-
- if not CONF.compute_feature_enabled.api_v3:
- skip_msg = ("%s skipped as nova v3 api is not available" %
- cls.__name__)
- raise cls.skipException(skip_msg)
-
- cls.set_network_resources()
- super(BaseV3ComputeTest, cls).setUpClass()
-
- cls.servers_client = cls.os.servers_v3_client
- cls.images_client = cls.os.image_client
- cls.flavors_client = cls.os.flavors_v3_client
- cls.services_client = cls.os.services_v3_client
- cls.extensions_client = cls.os.extensions_v3_client
- cls.availability_zone_client = cls.os.availability_zone_v3_client
- cls.interfaces_client = cls.os.interfaces_v3_client
- cls.hypervisor_client = cls.os.hypervisor_v3_client
- cls.keypairs_client = cls.os.keypairs_v3_client
- cls.volumes_client = cls.os.volumes_client
- cls.certificates_client = cls.os.certificates_v3_client
- cls.keypairs_client = cls.os.keypairs_v3_client
- cls.aggregates_client = cls.os.aggregates_v3_client
- cls.hosts_client = cls.os.hosts_v3_client
- cls.quotas_client = cls.os.quotas_v3_client
- cls.version_client = cls.os.version_v3_client
- cls.migrations_client = cls.os.migrations_v3_client
-
- @classmethod
- def create_image_from_server(cls, server_id, **kwargs):
- """Wrapper utility that returns an image created from the server."""
- name = data_utils.rand_name(cls.__name__ + "-image")
- if 'name' in kwargs:
- name = kwargs.pop('name')
-
- resp, image = cls.servers_client.create_image(
- server_id, name)
- image_id = data_utils.parse_image_id(resp['location'])
- cls.images.append(image_id)
-
- if 'wait_until' in kwargs:
- cls.images_client.wait_for_image_status(image_id,
- kwargs['wait_until'])
- resp, image = cls.images_client.get_image_meta(image_id)
-
- return resp, image
-
- @classmethod
- def rebuild_server(cls, server_id, **kwargs):
- # Destroy an existing server and creates a new one
- try:
- cls.servers_client.delete_server(server_id)
- cls.servers_client.wait_for_server_termination(server_id)
- except Exception:
- LOG.exception('Failed to delete server %s' % server_id)
- pass
- resp, server = cls.create_test_server(wait_until='ACTIVE', **kwargs)
- cls.password = server['admin_password']
- return server['id']
-
- @classmethod
- def delete_volume(cls, volume_id):
- """Deletes the given volume and waits for it to be gone."""
- cls._delete_volume(cls.volumes_client, volume_id)
-
-
-class BaseV3ComputeAdminTest(BaseV3ComputeTest):
- """Base test case class for all Compute Admin API V3 tests."""
-
- @classmethod
- def setUpClass(cls):
- super(BaseV3ComputeAdminTest, cls).setUpClass()
- admin_username = CONF.compute_admin.username
- admin_password = CONF.compute_admin.password
- admin_tenant = CONF.compute_admin.tenant_name
- if not (admin_username and admin_password and admin_tenant):
- msg = ("Missing Compute Admin API credentials "
- "in configuration.")
- raise cls.skipException(msg)
- if CONF.compute.allow_tenant_isolation:
+ super(BaseComputeAdminTest, cls).setUpClass()
+ if (CONF.compute.allow_tenant_isolation or
+ cls.force_tenant_isolation is True):
creds = cls.isolated_creds.get_admin_creds()
- admin_username, admin_tenant_name, admin_password = creds
- os_adm = clients.Manager(username=admin_username,
- password=admin_password,
- tenant_name=admin_tenant_name,
- interface=cls._interface)
+ cls.os_adm = clients.Manager(credentials=creds,
+ interface=cls._interface)
else:
- os_adm = clients.ComputeAdminManager(interface=cls._interface)
+ try:
+ cls.os_adm = clients.ComputeAdminManager(
+ interface=cls._interface)
+ except exceptions.InvalidCredentials:
+ msg = ("Missing Compute Admin API credentials "
+ "in configuration.")
+ raise cls.skipException(msg)
- cls.os_adm = os_adm
- cls.servers_admin_client = cls.os_adm.servers_v3_client
- cls.services_admin_client = cls.os_adm.services_v3_client
- cls.availability_zone_admin_client = \
- cls.os_adm.availability_zone_v3_client
- cls.hypervisor_admin_client = cls.os_adm.hypervisor_v3_client
- cls.flavors_admin_client = cls.os_adm.flavors_v3_client
- cls.aggregates_admin_client = cls.os_adm.aggregates_v3_client
- cls.hosts_admin_client = cls.os_adm.hosts_v3_client
- cls.quotas_admin_client = cls.os_adm.quotas_v3_client
- cls.agents_admin_client = cls.os_adm.agents_v3_client
- cls.migrations_admin_client = cls.os_adm.migrations_v3_client
+ if cls._api_version == 3:
+ cls.servers_admin_client = cls.os_adm.servers_v3_client
+ cls.services_admin_client = cls.os_adm.services_v3_client
+ cls.availability_zone_admin_client = \
+ cls.os_adm.availability_zone_v3_client
+ cls.hypervisor_admin_client = cls.os_adm.hypervisor_v3_client
+ cls.flavors_admin_client = cls.os_adm.flavors_v3_client
+ cls.aggregates_admin_client = cls.os_adm.aggregates_v3_client
+ cls.hosts_admin_client = cls.os_adm.hosts_v3_client
+ cls.quotas_admin_client = cls.os_adm.quotas_v3_client
+ cls.agents_admin_client = cls.os_adm.agents_v3_client
+ cls.migrations_admin_client = cls.os_adm.migrations_v3_client
+
+
+class BaseV2ComputeAdminTest(BaseComputeAdminTest):
+ """Base test case class for Compute Admin V2 API tests."""
+ _api_version = 2
+
+
+class BaseV3ComputeAdminTest(BaseComputeAdminTest):
+ """Base test case class for Compute Admin V3 API tests."""
+ _api_version = 3
diff --git a/tempest/api/compute/certificates/test_certificates.py b/tempest/api/compute/certificates/test_certificates.py
index f6cadf7..0f921c5 100644
--- a/tempest/api/compute/certificates/test_certificates.py
+++ b/tempest/api/compute/certificates/test_certificates.py
@@ -17,13 +17,14 @@
from tempest import test
-class CertificatesTestJSON(base.BaseV2ComputeTest):
+class CertificatesV3Test(base.BaseComputeTest):
+
+ _api_version = 3
@test.attr(type='gate')
def test_create_root_certificate(self):
# create certificates
resp, body = self.certificates_client.create_certificate()
- self.assertEqual(200, resp.status)
self.assertIn('data', body)
self.assertIn('private_key', body)
@@ -36,5 +37,9 @@
self.assertIn('private_key', body)
-class CertificatesTestXML(CertificatesTestJSON):
+class CertificatesV2TestJSON(CertificatesV3Test):
+ _api_version = 2
+
+
+class CertificatesV2TestXML(CertificatesV2TestJSON):
_interface = 'xml'
diff --git a/tempest/api/compute/flavors/test_flavors.py b/tempest/api/compute/flavors/test_flavors.py
index 6e202f6..bfebb5e 100644
--- a/tempest/api/compute/flavors/test_flavors.py
+++ b/tempest/api/compute/flavors/test_flavors.py
@@ -17,11 +17,15 @@
from tempest import test
-class FlavorsTestJSON(base.BaseV2ComputeTest):
+class FlavorsV3Test(base.BaseComputeTest):
+
+ _api_version = 3
+ _min_disk = 'min_disk'
+ _min_ram = 'min_ram'
@classmethod
def setUpClass(cls):
- super(FlavorsTestJSON, cls).setUpClass()
+ super(FlavorsV3Test, cls).setUpClass()
cls.client = cls.flavors_client
@test.attr(type='smoke')
@@ -89,7 +93,7 @@
flavors = sorted(flavors, key=lambda k: k['disk'])
flavor_id = flavors[0]['id']
- params = {'minDisk': flavors[0]['disk'] + 1}
+ params = {self._min_disk: flavors[0]['disk'] + 1}
resp, flavors = self.client.list_flavors_with_detail(params)
self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]))
@@ -100,7 +104,7 @@
flavors = sorted(flavors, key=lambda k: k['ram'])
flavor_id = flavors[0]['id']
- params = {'minRam': flavors[0]['ram'] + 1}
+ params = {self._min_ram: flavors[0]['ram'] + 1}
resp, flavors = self.client.list_flavors_with_detail(params)
self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]))
@@ -111,7 +115,7 @@
flavors = sorted(flavors, key=lambda k: k['disk'])
flavor_id = flavors[0]['id']
- params = {'minDisk': flavors[0]['disk'] + 1}
+ params = {self._min_disk: flavors[0]['disk'] + 1}
resp, flavors = self.client.list_flavors(params)
self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]))
@@ -122,10 +126,17 @@
flavors = sorted(flavors, key=lambda k: k['ram'])
flavor_id = flavors[0]['id']
- params = {'minRam': flavors[0]['ram'] + 1}
+ params = {self._min_ram: flavors[0]['ram'] + 1}
resp, flavors = self.client.list_flavors(params)
self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]))
-class FlavorsTestXML(FlavorsTestJSON):
+class FlavorsV2TestJSON(FlavorsV3Test):
+
+ _api_version = 2
+ _min_disk = 'minDisk'
+ _min_ram = 'minRam'
+
+
+class FlavorsV2TestXML(FlavorsV2TestJSON):
_interface = 'xml'
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index d2fd970..c81cec5 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -26,6 +26,11 @@
class ImagesOneServerTestJSON(base.BaseV2ComputeTest):
+ def tearDown(self):
+ """Terminate test instances created after a test is executed."""
+ self.server_check_teardown()
+ super(ImagesOneServerTestJSON, self).tearDown()
+
def setUp(self):
# NOTE(afazekas): Normally we use the same server with all test cases,
# but if it has an issue, we build a new one
diff --git a/tempest/api/compute/images/test_images_oneserver_negative.py b/tempest/api/compute/images/test_images_oneserver_negative.py
index 41a0590..9c4ab00 100644
--- a/tempest/api/compute/images/test_images_oneserver_negative.py
+++ b/tempest/api/compute/images/test_images_oneserver_negative.py
@@ -33,6 +33,7 @@
for image_id in self.image_ids:
self.client.delete_image(image_id)
self.image_ids.remove(image_id)
+ self.server_check_teardown()
super(ImagesOneServerNegativeTestJSON, self).tearDown()
def setUp(self):
diff --git a/tempest/api/compute/keypairs/test_keypairs.py b/tempest/api/compute/keypairs/test_keypairs.py
index 67fafed..01979c0 100644
--- a/tempest/api/compute/keypairs/test_keypairs.py
+++ b/tempest/api/compute/keypairs/test_keypairs.py
@@ -18,16 +18,17 @@
from tempest import test
-class KeyPairsTestJSON(base.BaseV2ComputeTest):
+class KeyPairsV3Test(base.BaseComputeTest):
+
+ _api_version = 3
@classmethod
def setUpClass(cls):
- super(KeyPairsTestJSON, cls).setUpClass()
+ super(KeyPairsV3Test, cls).setUpClass()
cls.client = cls.keypairs_client
def _delete_keypair(self, keypair_name):
resp, _ = self.client.delete_keypair(keypair_name)
- self.assertEqual(202, resp.status)
def _create_keypair(self, keypair_name, pub_key=None):
resp, body = self.client.create_keypair(keypair_name, pub_key)
@@ -46,7 +47,6 @@
# as the keypair dicts from list API doesn't have them.
keypair.pop('private_key')
keypair.pop('user_id')
- self.assertEqual(200, resp.status)
key_list.append(keypair)
# Fetch all keypairs and verify the list
# has all created keypairs
@@ -69,7 +69,6 @@
# Keypair should be created, verified and deleted
k_name = data_utils.rand_name('keypair-')
resp, keypair = self._create_keypair(k_name)
- self.assertEqual(200, resp.status)
private_key = keypair['private_key']
key_name = keypair['name']
self.assertEqual(key_name, k_name,
@@ -108,7 +107,6 @@
"XcPojYN56tI0OlrGqojbediJYD0rUsJu4weZpbn8vilb3JuDY+jws"
"snSA8wzBx3A/8y9Pp1B nova@ubuntu")
resp, keypair = self._create_keypair(k_name, pub_key)
- self.assertEqual(200, resp.status)
self.assertFalse('private_key' in keypair,
"Field private_key is not empty!")
key_name = keypair['name']
@@ -117,5 +115,9 @@
"to the requested name!")
-class KeyPairsTestXML(KeyPairsTestJSON):
+class KeyPairsV2TestJSON(KeyPairsV3Test):
+ _api_version = 2
+
+
+class KeyPairsV2TestXML(KeyPairsV2TestJSON):
_interface = 'xml'
diff --git a/tempest/api/compute/security_groups/test_security_group_rules.py b/tempest/api/compute/security_groups/test_security_group_rules.py
index b04ab8a..35f6fc2 100644
--- a/tempest/api/compute/security_groups/test_security_group_rules.py
+++ b/tempest/api/compute/security_groups/test_security_group_rules.py
@@ -75,7 +75,6 @@
to_port,
cidr=cidr,
group_id=group_id)
- self.addCleanup(self.client.delete_security_group_rule, rule['id'])
self.assertEqual(200, resp.status)
@test.attr(type='smoke')
@@ -95,8 +94,6 @@
ip_protocol1,
from_port1, to_port1)
rule1_id = rule['id']
- # Delete the Security Group rule1 at the end of this method
- self.addCleanup(self.client.delete_security_group_rule, rule1_id)
# Add a second rule to the created Security Group
ip_protocol2 = 'icmp'
diff --git a/tempest/api/compute/security_groups/test_security_groups.py b/tempest/api/compute/security_groups/test_security_groups.py
index 3736f28..a077943 100644
--- a/tempest/api/compute/security_groups/test_security_groups.py
+++ b/tempest/api/compute/security_groups/test_security_groups.py
@@ -30,29 +30,31 @@
def test_security_groups_create_list_delete(self):
# Positive test:Should return the list of Security Groups
# Create 3 Security Groups
+ security_group_list = []
for i in range(3):
- resp, securitygroup = self.create_security_group()
+ resp, body = self.create_security_group()
self.assertEqual(200, resp.status)
+ security_group_list.append(body)
# Fetch all Security Groups and verify the list
# has all created Security Groups
resp, fetched_list = self.client.list_security_groups()
self.assertEqual(200, resp.status)
# Now check if all the created Security Groups are in fetched list
missing_sgs = \
- [sg for sg in self.security_groups if sg not in fetched_list]
+ [sg for sg in security_group_list if sg not in fetched_list]
self.assertFalse(missing_sgs,
"Failed to find Security Group %s in fetched "
"list" % ', '.join(m_group['name']
for m_group in missing_sgs))
# Delete all security groups
- for sg in self.security_groups:
+ for sg in security_group_list:
resp, _ = self.client.delete_security_group(sg['id'])
self.assertEqual(202, resp.status)
self.client.wait_for_resource_deletion(sg['id'])
# Now check if all the created Security Groups are deleted
resp, fetched_list = self.client.list_security_groups()
deleted_sgs = \
- [sg for sg in self.security_groups if sg in fetched_list]
+ [sg for sg in security_group_list if sg in fetched_list]
self.assertFalse(deleted_sgs,
"Failed to delete Security Group %s "
"list" % ', '.join(m_group['name']
@@ -78,6 +80,9 @@
self.assertEqual(securitygroup, fetched_group,
"The fetched Security Group is different "
"from the created Group")
+ resp, _ = self.client.delete_security_group(securitygroup['id'])
+ self.assertEqual(202, resp.status)
+ self.client.wait_for_resource_deletion(securitygroup['id'])
@test.attr(type='smoke')
def test_server_security_groups(self):
@@ -120,9 +125,9 @@
self.servers_client.delete_server(server_id)
self.servers_client.wait_for_server_termination(server_id)
- self.client.delete_security_group(sg['id'])
+ resp, _ = self.client.delete_security_group(sg['id'])
self.assertEqual(202, resp.status)
- self.client.delete_security_group(sg2['id'])
+ resp, _ = self.client.delete_security_group(sg2['id'])
self.assertEqual(202, resp.status)
@test.attr(type='smoke')
diff --git a/tempest/api/compute/servers/test_availability_zone.py b/tempest/api/compute/servers/test_availability_zone.py
index 7b12555..cf9837f 100644
--- a/tempest/api/compute/servers/test_availability_zone.py
+++ b/tempest/api/compute/servers/test_availability_zone.py
@@ -17,15 +17,15 @@
from tempest import test
-class AZTestJSON(base.BaseV2ComputeTest):
-
+class AZV3Test(base.BaseComputeTest):
"""
Tests Availability Zone API List
"""
+ _api_version = 3
@classmethod
def setUpClass(cls):
- super(AZTestJSON, cls).setUpClass()
+ super(AZV3Test, cls).setUpClass()
cls.client = cls.availability_zone_client
@test.attr(type='gate')
@@ -36,5 +36,9 @@
self.assertTrue(len(availability_zone) > 0)
-class AZTestXML(AZTestJSON):
+class AZV2TestJSON(AZV3Test):
+ _api_version = 2
+
+
+class AZV2TestXML(AZV2TestJSON):
_interface = 'xml'
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index 9d6a1c1..e135eca 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -54,13 +54,6 @@
resp, cls.server = cls.client.get_server(cls.server_initial['id'])
@test.attr(type='smoke')
- def test_create_server_response(self):
- # Check that the required fields are returned with values
- self.assertEqual(202, self.resp.status)
- self.assertTrue(self.server_initial['id'] is not None)
- self.assertTrue(self.server_initial['adminPass'] is not None)
-
- @test.attr(type='smoke')
def test_verify_server_details(self):
# Verify the specified server attributes are set correctly
self.assertEqual(self.accessIPv4, self.server['accessIPv4'])
@@ -117,25 +110,8 @@
def setUpClass(cls):
cls.prepare_instance_network()
super(ServersWithSpecificFlavorTestJSON, cls).setUpClass()
- cls.meta = {'hello': 'world'}
- cls.accessIPv4 = '1.1.1.1'
- cls.accessIPv6 = '0000:0000:0000:0000:0000:babe:220.12.22.2'
- cls.name = data_utils.rand_name('server')
- file_contents = 'This is a test file.'
- personality = [{'path': '/test.txt',
- 'contents': base64.b64encode(file_contents)}]
- cls.client = cls.servers_client
cls.flavor_client = cls.os_adm.flavors_client
- cli_resp = cls.create_test_server(name=cls.name,
- meta=cls.meta,
- accessIPv4=cls.accessIPv4,
- accessIPv6=cls.accessIPv6,
- personality=personality,
- disk_config=cls.disk_config)
- cls.resp, cls.server_initial = cli_resp
- cls.password = cls.server_initial['adminPass']
- cls.client.wait_for_server_status(cls.server_initial['id'], 'ACTIVE')
- resp, cls.server = cls.client.get_server(cls.server_initial['id'])
+ cls.client = cls.servers_client
@testtools.skipUnless(CONF.compute.run_ssh,
'Instance validation tests are disabled.')
@@ -143,7 +119,7 @@
def test_verify_created_server_ephemeral_disk(self):
# Verify that the ephemeral disk is created when creating server
- def create_flavor_with_extra_specs(self):
+ def create_flavor_with_extra_specs():
flavor_with_eph_disk_name = data_utils.rand_name('eph_flavor')
flavor_with_eph_disk_id = data_utils.rand_int_id(start=1000)
ram = 64
@@ -156,12 +132,12 @@
ram, vcpus, disk,
flavor_with_eph_disk_id,
ephemeral=1))
- self.addCleanup(self.flavor_clean_up, flavor['id'])
+ self.addCleanup(flavor_clean_up, flavor['id'])
self.assertEqual(200, resp.status)
return flavor['id']
- def create_flavor_without_extra_specs(self):
+ def create_flavor_without_extra_specs():
flavor_no_eph_disk_name = data_utils.rand_name('no_eph_flavor')
flavor_no_eph_disk_id = data_utils.rand_int_id(start=1000)
@@ -174,18 +150,18 @@
create_flavor(flavor_no_eph_disk_name,
ram, vcpus, disk,
flavor_no_eph_disk_id))
- self.addCleanup(self.flavor_clean_up, flavor['id'])
+ self.addCleanup(flavor_clean_up, flavor['id'])
self.assertEqual(200, resp.status)
return flavor['id']
- def flavor_clean_up(self, flavor_id):
+ def flavor_clean_up(flavor_id):
resp, body = self.flavor_client.delete_flavor(flavor_id)
self.assertEqual(resp.status, 202)
self.flavor_client.wait_for_resource_deletion(flavor_id)
- flavor_with_eph_disk_id = self.create_flavor_with_extra_specs()
- flavor_no_eph_disk_id = self.create_flavor_without_extra_specs()
+ flavor_with_eph_disk_id = create_flavor_with_extra_specs()
+ flavor_no_eph_disk_id = create_flavor_without_extra_specs()
admin_pass = self.image_ssh_password
@@ -198,13 +174,18 @@
adminPass=admin_pass,
flavor=flavor_with_eph_disk_id))
# Get partition number of server without extra specs.
+ _, server_no_eph_disk = self.client.get_server(
+ server_no_eph_disk['id'])
linux_client = remote_client.RemoteClient(server_no_eph_disk,
- self.ssh_user, self.password)
- partition_num = len(linux_client.get_partitions())
+ self.ssh_user, admin_pass)
+ partition_num = len(linux_client.get_partitions().split('\n'))
+ _, server_with_eph_disk = self.client.get_server(
+ server_with_eph_disk['id'])
linux_client = remote_client.RemoteClient(server_with_eph_disk,
- self.ssh_user, self.password)
- self.assertEqual(partition_num + 1, linux_client.get_partitions())
+ self.ssh_user, admin_pass)
+ partition_num_emph = len(linux_client.get_partitions().split('\n'))
+ self.assertEqual(partition_num + 1, partition_num_emph)
class ServersTestManualDisk(ServersTestJSON):
diff --git a/tempest/api/compute/servers/test_list_servers_negative.py b/tempest/api/compute/servers/test_list_servers_negative.py
index 768cc11..28d64fb 100644
--- a/tempest/api/compute/servers/test_list_servers_negative.py
+++ b/tempest/api/compute/servers/test_list_servers_negative.py
@@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import datetime
-
from six import moves
from tempest.api.compute import base
@@ -37,9 +35,8 @@
# tearDownClass method of the super-class.
cls.existing_fixtures = []
cls.deleted_fixtures = []
- cls.start_time = datetime.datetime.utcnow()
for x in moves.xrange(2):
- resp, srv = cls.create_test_server()
+ resp, srv = cls.create_test_server(wait_until='ACTIVE')
cls.existing_fixtures.append(srv)
resp, srv = cls.create_test_server()
@@ -127,19 +124,6 @@
self.assertRaises(exceptions.BadRequest, self.client.list_servers,
{'limit': -1})
- @test.attr(type='gate')
- def test_list_servers_by_changes_since(self):
- # Servers are listed by specifying changes-since date
- changes_since = {'changes-since': self.start_time.isoformat()}
- resp, body = self.client.list_servers(changes_since)
- self.assertEqual('200', resp['status'])
- # changes-since returns all instances, including deleted.
- num_expected = (len(self.existing_fixtures) +
- len(self.deleted_fixtures))
- self.assertEqual(num_expected, len(body['servers']),
- "Number of servers %d is wrong in %s" %
- (num_expected, body['servers']))
-
@test.attr(type=['negative', 'gate'])
def test_list_servers_by_changes_since_invalid_date(self):
# Return an error when invalid date format is passed
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 1f2bca9..d0fd876 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -42,6 +42,12 @@
# Rebuild server if something happened to it during a test
self.__class__.server_id = self.rebuild_server(self.server_id)
+ def tearDown(self):
+ _, server = self.client.get_server(self.server_id)
+ self.assertEqual(self.image_ref, server['image']['id'])
+ self.server_check_teardown()
+ super(ServerActionsTestJSON, self).tearDown()
+
@classmethod
def setUpClass(cls):
cls.prepare_instance_network()
@@ -126,7 +132,6 @@
metadata=meta,
personality=personality,
adminPass=password)
- self.addCleanup(self.client.rebuild, self.server_id, self.image_ref)
# Verify the properties in the initial response are correct
self.assertEqual(self.server_id, rebuilt_server['id'])
@@ -146,6 +151,8 @@
linux_client = remote_client.RemoteClient(server, self.ssh_user,
password)
linux_client.validate_authentication()
+ if self.image_ref_alt != self.image_ref:
+ self.client.rebuild(self.server_id, self.image_ref)
@test.attr(type='gate')
def test_rebuild_server_in_stop_state(self):
@@ -158,11 +165,7 @@
resp, server = self.client.stop(self.server_id)
self.assertEqual(202, resp.status)
self.client.wait_for_server_status(self.server_id, 'SHUTOFF')
- self.addCleanup(self.client.start, self.server_id)
resp, rebuilt_server = self.client.rebuild(self.server_id, new_image)
- self.addCleanup(self.client.wait_for_server_status, self.server_id,
- 'SHUTOFF')
- self.addCleanup(self.client.rebuild, self.server_id, old_image)
# Verify the properties in the initial response are correct
self.assertEqual(self.server_id, rebuilt_server['id'])
@@ -176,6 +179,12 @@
rebuilt_image_id = server['image']['id']
self.assertEqual(new_image, rebuilt_image_id)
+ # Restore to the original image (The tearDown will test it again)
+ if self.image_ref_alt != self.image_ref:
+ self.client.rebuild(self.server_id, old_image)
+ self.client.wait_for_server_status(self.server_id, 'SHUTOFF')
+ self.client.start(self.server_id)
+
def _detect_server_image_flavor(self, server_id):
# Detects the current server image flavor ref.
resp, server = self.client.get_server(server_id)
@@ -184,26 +193,46 @@
if current_flavor == self.flavor_ref else self.flavor_ref
return current_flavor, new_flavor_ref
- @testtools.skipUnless(CONF.compute_feature_enabled.resize,
- 'Resize not available.')
- @test.attr(type='smoke')
- def test_resize_server_confirm(self):
+ def _test_resize_server_confirm(self, stop=False):
# The server's RAM and disk space should be modified to that of
# the provided flavor
previous_flavor_ref, new_flavor_ref = \
self._detect_server_image_flavor(self.server_id)
+ if stop:
+ resp = self.servers_client.stop(self.server_id)[0]
+ self.assertEqual(202, resp.status)
+ self.servers_client.wait_for_server_status(self.server_id,
+ 'SHUTOFF')
+
resp, server = self.client.resize(self.server_id, new_flavor_ref)
self.assertEqual(202, resp.status)
self.client.wait_for_server_status(self.server_id, 'VERIFY_RESIZE')
self.client.confirm_resize(self.server_id)
- self.client.wait_for_server_status(self.server_id, 'ACTIVE')
+ expected_status = 'SHUTOFF' if stop else 'ACTIVE'
+ self.client.wait_for_server_status(self.server_id, expected_status)
resp, server = self.client.get_server(self.server_id)
self.assertEqual(new_flavor_ref, server['flavor']['id'])
+ if stop:
+ # NOTE(mriedem): tearDown requires the server to be started.
+ self.client.start(self.server_id)
+
+ @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+ 'Resize not available.')
+ @test.attr(type='smoke')
+ def test_resize_server_confirm(self):
+ self._test_resize_server_confirm(stop=False)
+
+ @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+ 'Resize not available.')
+ @test.attr(type='smoke')
+ def test_resize_server_confirm_from_stopped(self):
+ self._test_resize_server_confirm(stop=True)
+
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@test.attr(type='gate')
diff --git a/tempest/api/compute/servers/test_server_group.py b/tempest/api/compute/servers/test_server_group.py
new file mode 100644
index 0000000..0cd23fd
--- /dev/null
+++ b/tempest/api/compute/servers/test_server_group.py
@@ -0,0 +1,112 @@
+# Copyright 2014 NEC Technologies India Ltd.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.compute import base
+from tempest.common.utils import data_utils
+from tempest import test
+
+
+class ServerGroupTestJSON(base.BaseV2ComputeTest):
+ """
+ These tests check for the server-group APIs
+ They create/delete server-groups with different policies.
+ policies = affinity/anti-affinity
+ It also adds the tests for list and get details of server-groups
+ """
+ @classmethod
+ @test.safe_setup
+ def setUpClass(cls):
+ super(ServerGroupTestJSON, cls).setUpClass()
+ if not test.is_extension_enabled('os-server-groups', 'compute'):
+ msg = "os-server-groups extension is not enabled."
+ raise cls.skipException(msg)
+ cls.client = cls.servers_client
+ server_group_name = data_utils.rand_name('server-group')
+ cls.policy = ['affinity']
+
+ _, cls.created_server_group = cls.create_test_server_group(
+ server_group_name,
+ cls.policy)
+
+ def _create_server_group(self, name, policy):
+ # create the test server-group with given policy
+ server_group = {'name': name, 'policies': policy}
+ resp, body = self.create_test_server_group(name, policy)
+ self.assertEqual(200, resp.status)
+ for key in ['name', 'policies']:
+ self.assertEqual(server_group[key], body[key])
+ return body
+
+ def _delete_server_group(self, server_group):
+ # delete the test server-group
+ resp, _ = self.client.delete_server_group(server_group['id'])
+ self.assertEqual(204, resp.status)
+ # validation of server-group deletion
+ resp, server_group_list = self.client.list_server_groups()
+ self.assertEqual(200, resp.status)
+ self.assertNotIn(server_group, server_group_list)
+
+ def _create_delete_server_group(self, policy):
+ # Create and Delete the server-group with given policy
+ name = data_utils.rand_name('server-group')
+ server_group = self._create_server_group(name, policy)
+ self._delete_server_group(server_group)
+
+ @test.attr(type='gate')
+ def test_create_delete_server_group_with_affinity_policy(self):
+ # Create and Delete the server-group with affinity policy
+ self._create_delete_server_group(self.policy)
+
+ @test.attr(type='gate')
+ def test_create_delete_server_group_with_anti_affinity_policy(self):
+ # Create and Delete the server-group with anti-affinity policy
+ policy = ['anti-affinity']
+ self._create_delete_server_group(policy)
+
+ @test.attr(type='gate')
+ def test_create_delete_server_group_with_multiple_policies(self):
+ # Create and Delete the server-group with multiple policies
+ policies = ['affinity', 'affinity']
+ self._create_delete_server_group(policies)
+
+ @test.attr(type='gate')
+ def test_create_delete_multiple_server_groups_with_same_name_policy(self):
+ # Create and Delete the server-groups with same name and same policy
+ server_groups = []
+ server_group_name = data_utils.rand_name('server-group')
+ for i in range(0, 2):
+ server_groups.append(self._create_server_group(server_group_name,
+ self.policy))
+ for key in ['name', 'policies']:
+ self.assertEqual(server_groups[0][key], server_groups[1][key])
+ self.assertNotEqual(server_groups[0]['id'], server_groups[1]['id'])
+
+ for i in range(0, 2):
+ self._delete_server_group(server_groups[i])
+
+ @test.attr(type='gate')
+ def test_get_server_group(self):
+ # Get the server-group
+ resp, body = self.client.get_server_group(
+ self.created_server_group['id'])
+ self.assertEqual(200, resp.status)
+ self.assertEqual(self.created_server_group, body)
+
+ @test.attr(type='gate')
+ def test_list_server_groups(self):
+ # List the server-group
+ resp, body = self.client.list_server_groups()
+ self.assertEqual(200, resp.status)
+ self.assertIn(self.created_server_group, body)
diff --git a/tempest/api/compute/servers/test_server_metadata_negative.py b/tempest/api/compute/servers/test_server_metadata_negative.py
index 8b69c78..b55833c 100644
--- a/tempest/api/compute/servers/test_server_metadata_negative.py
+++ b/tempest/api/compute/servers/test_server_metadata_negative.py
@@ -26,10 +26,7 @@
super(ServerMetadataNegativeTestJSON, cls).setUpClass()
cls.client = cls.servers_client
cls.quotas = cls.quotas_client
- cls.admin_client = cls._get_identity_admin_client()
- resp, tenants = cls.admin_client.list_tenants()
- cls.tenant_id = [tnt['id'] for tnt in tenants if tnt['name'] ==
- cls.client.tenant_name][0]
+ cls.tenant_id = cls.client.tenant_id
resp, server = cls.create_test_server(meta={}, wait_until='ACTIVE')
cls.server_id = server['id']
diff --git a/tempest/api/compute/servers/test_server_personality.py b/tempest/api/compute/servers/test_server_personality.py
index ddfc1d5..b7e4e38 100644
--- a/tempest/api/compute/servers/test_server_personality.py
+++ b/tempest/api/compute/servers/test_server_personality.py
@@ -60,25 +60,6 @@
resp, server = self.create_test_server(personality=person)
self.assertEqual('202', resp['status'])
- @test.attr(type='gate')
- def test_create_server_with_existent_personality_file(self):
- # Any existing file that match specified file will be renamed to
- # include the bak extension appended with a time stamp
-
- # TODO(zhikunliu): will add validations when ssh instance validation
- # re-factor is ready
- file_contents = 'This is a test file.'
- personality = [{'path': '/test.txt',
- 'contents': base64.b64encode(file_contents)}]
- resp, server = self.create_test_server(personality=personality,
- wait_until="ACTIVE")
- resp, image = self.create_image_from_server(server['id'],
- wait_until="ACTIVE")
- resp, server = self.create_test_server(image_id=image['id'],
- personality=personality,
- wait_until="ACTIVE")
- self.assertEqual('202', resp['status'])
-
class ServerPersonalityTestXML(ServerPersonalityTestJSON):
_interface = "xml"
diff --git a/tempest/api/compute/servers/test_servers.py b/tempest/api/compute/servers/test_servers.py
index 40b97d7..936b871 100644
--- a/tempest/api/compute/servers/test_servers.py
+++ b/tempest/api/compute/servers/test_servers.py
@@ -70,20 +70,34 @@
resp, server = self.client.get_server(server['id'])
self.assertEqual(key_name, server['key_name'])
+ def _update_server_name(self, server_id, status):
+ # The server name should be changed to the the provided value
+ new_name = data_utils.rand_name('server')
+ # Update the server with a new name
+ resp, server = self.client.update_server(server_id,
+ name=new_name)
+ self.client.wait_for_server_status(server_id, status)
+
+ # Verify the name of the server has changed
+ resp, server = self.client.get_server(server_id)
+ self.assertEqual(new_name, server['name'])
+ return server
+
@test.attr(type='gate')
def test_update_server_name(self):
# The server name should be changed to the the provided value
resp, server = self.create_test_server(wait_until='ACTIVE')
- # Update the server with a new name
- resp, server = self.client.update_server(server['id'],
- name='newname')
- self.assertEqual(200, resp.status)
- self.client.wait_for_server_status(server['id'], 'ACTIVE')
+ self._update_server_name(server['id'], 'ACTIVE')
- # Verify the name of the server has changed
- resp, server = self.client.get_server(server['id'])
- self.assertEqual('newname', server['name'])
+ @test.attr(type='gate')
+ def test_update_server_name_in_stop_state(self):
+ # The server name should be changed to the the provided value
+ resp, server = self.create_test_server(wait_until='ACTIVE')
+ self.client.stop(server['id'])
+ self.client.wait_for_server_status(server['id'], 'SHUTOFF')
+ updated_server = self._update_server_name(server['id'], 'SHUTOFF')
+ self.assertNotIn('progress', updated_server)
@test.attr(type='gate')
def test_update_access_server_address(self):
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index cc801b5..5ac667e 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -37,6 +37,10 @@
except Exception:
self.__class__.server_id = self.rebuild_server(self.server_id)
+ def tearDown(self):
+ self.server_check_teardown()
+ super(ServersNegativeTestJSON, self).tearDown()
+
@classmethod
def setUpClass(cls):
super(ServersNegativeTestJSON, cls).setUpClass()
@@ -133,12 +137,11 @@
def test_pause_paused_server(self):
# Pause a paused server.
self.client.pause_server(self.server_id)
- self.addCleanup(self.client.unpause_server,
- self.server_id)
self.client.wait_for_server_status(self.server_id, 'PAUSED')
self.assertRaises(exceptions.Conflict,
self.client.pause_server,
self.server_id)
+ self.client.unpause_server(self.server_id)
@test.attr(type=['negative', 'gate'])
def test_rebuild_reboot_deleted_server(self):
@@ -350,13 +353,12 @@
def test_suspend_server_invalid_state(self):
# suspend a suspended server.
resp, _ = self.client.suspend_server(self.server_id)
- self.addCleanup(self.client.resume_server,
- self.server_id)
self.assertEqual(202, resp.status)
self.client.wait_for_server_status(self.server_id, 'SUSPENDED')
self.assertRaises(exceptions.Conflict,
self.client.suspend_server,
self.server_id)
+ self.client.resume_server(self.server_id)
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@@ -426,7 +428,6 @@
# shelve a shelved server.
resp, server = self.client.shelve_server(self.server_id)
self.assertEqual(202, resp.status)
- self.addCleanup(self.client.unshelve_server, self.server_id)
offload_time = CONF.compute.shelved_offload_time
if offload_time >= 0:
@@ -448,6 +449,8 @@
self.client.shelve_server,
self.server_id)
+ self.client.unshelve_server(self.server_id)
+
@test.attr(type=['negative', 'gate'])
def test_unshelve_non_existent_server(self):
# unshelve a non existent server
diff --git a/tempest/api/compute/test_authorization.py b/tempest/api/compute/test_authorization.py
index c87f24e..375ddf8 100644
--- a/tempest/api/compute/test_authorization.py
+++ b/tempest/api/compute/test_authorization.py
@@ -43,10 +43,7 @@
if CONF.compute.allow_tenant_isolation:
creds = cls.isolated_creds.get_alt_creds()
- username, tenant_name, password = creds
- cls.alt_manager = clients.Manager(username=username,
- password=password,
- tenant_name=tenant_name)
+ cls.alt_manager = clients.Manager(credentials=creds)
else:
# Use the alt_XXX credentials in the config file
cls.alt_manager = clients.AltManager()
diff --git a/tempest/api/compute/test_quotas.py b/tempest/api/compute/test_quotas.py
index 4db8c56..dc85e76 100644
--- a/tempest/api/compute/test_quotas.py
+++ b/tempest/api/compute/test_quotas.py
@@ -23,13 +23,8 @@
def setUpClass(cls):
super(QuotasTestJSON, cls).setUpClass()
cls.client = cls.quotas_client
- cls.admin_client = cls._get_identity_admin_client()
- resp, tenants = cls.admin_client.list_tenants()
- cls.tenant_id = [tnt['id'] for tnt in tenants if tnt['name'] ==
- cls.client.tenant_name][0]
- resp, users = cls.admin_client.list_users_for_tenant(cls.tenant_id)
- cls.user_id = [user['id'] for user in users if user['name'] ==
- cls.client.user][0]
+ cls.tenant_id = cls.client.tenant_id
+ cls.user_id = cls.client.user_id
cls.default_quota_set = set(('injected_file_content_bytes',
'metadata_items', 'injected_files',
'ram', 'floating_ips',
diff --git a/tempest/api/compute/v3/certificates/__init__.py b/tempest/api/compute/v2/__init__.py
similarity index 100%
rename from tempest/api/compute/v3/certificates/__init__.py
rename to tempest/api/compute/v2/__init__.py
diff --git a/tempest/api/compute/v3/admin/test_availability_zone.py b/tempest/api/compute/v3/admin/test_availability_zone.py
deleted file mode 100644
index 9ca8953..0000000
--- a/tempest/api/compute/v3/admin/test_availability_zone.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright 2013 NEC Corporation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.api.compute import base
-from tempest.test import attr
-
-
-class AZAdminV3Test(base.BaseV3ComputeAdminTest):
-
- """
- Tests Availability Zone API List
- """
-
- @classmethod
- def setUpClass(cls):
- super(AZAdminV3Test, cls).setUpClass()
- cls.client = cls.availability_zone_admin_client
-
- @attr(type='gate')
- def test_get_availability_zone_list(self):
- # List of availability zone
- resp, availability_zone = self.client.get_availability_zone_list()
- self.assertEqual(200, resp.status)
- self.assertTrue(len(availability_zone) > 0)
-
- @attr(type='gate')
- def test_get_availability_zone_list_detail(self):
- # List of availability zones and available services
- resp, availability_zone = \
- self.client.get_availability_zone_list_detail()
- self.assertEqual(200, resp.status)
- self.assertTrue(len(availability_zone) > 0)
diff --git a/tempest/api/compute/v3/admin/test_availability_zone_negative.py b/tempest/api/compute/v3/admin/test_availability_zone_negative.py
index f3af6df..b012e65 100644
--- a/tempest/api/compute/v3/admin/test_availability_zone_negative.py
+++ b/tempest/api/compute/v3/admin/test_availability_zone_negative.py
@@ -15,7 +15,7 @@
from tempest.api.compute import base
from tempest import exceptions
-from tempest.test import attr
+from tempest import test
class AZAdminNegativeV3Test(base.BaseV3ComputeAdminTest):
@@ -30,7 +30,7 @@
cls.client = cls.availability_zone_admin_client
cls.non_adm_client = cls.availability_zone_client
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_get_availability_zone_list_detail_with_non_admin_user(self):
# List of availability zones and available services with
# non-administrator user
diff --git a/tempest/api/compute/v3/admin/test_flavors_access.py b/tempest/api/compute/v3/admin/test_flavors_access.py
index 03305ff..c641bf6 100644
--- a/tempest/api/compute/v3/admin/test_flavors_access.py
+++ b/tempest/api/compute/v3/admin/test_flavors_access.py
@@ -31,12 +31,8 @@
cls.client = cls.flavors_admin_client
admin_client = cls._get_identity_admin_client()
- cls.tenant = admin_client.get_tenant_by_name(cls.flavors_client.
- tenant_name)
- cls.tenant_id = cls.tenant['id']
- cls.adm_tenant = admin_client.get_tenant_by_name(
- cls.flavors_admin_client.tenant_name)
- cls.adm_tenant_id = cls.adm_tenant['id']
+ cls.tenant_id = cls.client.tenant_id
+ cls.adm_tenant_id = admin_client.tenant_id
cls.flavor_name_prefix = 'test_flavor_access_'
cls.ram = 512
cls.vcpus = 1
diff --git a/tempest/api/compute/v3/admin/test_flavors_access_negative.py b/tempest/api/compute/v3/admin/test_flavors_access_negative.py
index 334d124..02ecb24 100644
--- a/tempest/api/compute/v3/admin/test_flavors_access_negative.py
+++ b/tempest/api/compute/v3/admin/test_flavors_access_negative.py
@@ -33,13 +33,7 @@
super(FlavorsAccessNegativeV3Test, cls).setUpClass()
cls.client = cls.flavors_admin_client
- admin_client = cls._get_identity_admin_client()
- cls.tenant = admin_client.get_tenant_by_name(cls.flavors_client.
- tenant_name)
- cls.tenant_id = cls.tenant['id']
- cls.adm_tenant = admin_client.get_tenant_by_name(
- cls.flavors_admin_client.tenant_name)
- cls.adm_tenant_id = cls.adm_tenant['id']
+ cls.tenant_id = cls.client.tenant_id
cls.flavor_name_prefix = 'test_flavor_access_'
cls.ram = 512
cls.vcpus = 1
diff --git a/tempest/api/compute/v3/admin/test_hypervisor.py b/tempest/api/compute/v3/admin/test_hypervisor.py
index 93d4441..f3397a8 100644
--- a/tempest/api/compute/v3/admin/test_hypervisor.py
+++ b/tempest/api/compute/v3/admin/test_hypervisor.py
@@ -14,7 +14,7 @@
# under the License.
from tempest.api.compute import base
-from tempest.test import attr
+from tempest import test
class HypervisorAdminV3Test(base.BaseV3ComputeAdminTest):
@@ -34,20 +34,20 @@
self.assertEqual(200, resp.status)
return hypers
- @attr(type='gate')
+ @test.attr(type='gate')
def test_get_hypervisor_list(self):
# List of hypervisor and available hypervisors hostname
hypers = self._list_hypervisors()
self.assertTrue(len(hypers) > 0)
- @attr(type='gate')
+ @test.attr(type='gate')
def test_get_hypervisor_list_details(self):
# Display the details of the all hypervisor
resp, hypers = self.client.get_hypervisor_list_details()
self.assertEqual(200, resp.status)
self.assertTrue(len(hypers) > 0)
- @attr(type='gate')
+ @test.attr(type='gate')
def test_get_hypervisor_show_details(self):
# Display the details of the specified hypervisor
hypers = self._list_hypervisors()
@@ -60,7 +60,7 @@
self.assertEqual(details['hypervisor_hostname'],
hypers[0]['hypervisor_hostname'])
- @attr(type='gate')
+ @test.attr(type='gate')
def test_get_hypervisor_show_servers(self):
# Show instances about the specific hypervisors
hypers = self._list_hypervisors()
@@ -71,14 +71,14 @@
self.assertEqual(200, resp.status)
self.assertTrue(len(hypervisors) > 0)
- @attr(type='gate')
+ @test.attr(type='gate')
def test_get_hypervisor_stats(self):
# Verify the stats of the all hypervisor
resp, stats = self.client.get_hypervisor_stats()
self.assertEqual(200, resp.status)
self.assertTrue(len(stats) > 0)
- @attr(type='gate')
+ @test.attr(type='gate')
def test_get_hypervisor_uptime(self):
# Verify that GET shows the specified hypervisor uptime
hypers = self._list_hypervisors()
@@ -87,7 +87,7 @@
self.assertEqual(200, resp.status)
self.assertTrue(len(uptime) > 0)
- @attr(type='gate')
+ @test.attr(type='gate')
def test_search_hypervisor(self):
hypers = self._list_hypervisors()
self.assertTrue(len(hypers) > 0)
diff --git a/tempest/api/compute/v3/admin/test_hypervisor_negative.py b/tempest/api/compute/v3/admin/test_hypervisor_negative.py
index 45642b7..ae4df15 100644
--- a/tempest/api/compute/v3/admin/test_hypervisor_negative.py
+++ b/tempest/api/compute/v3/admin/test_hypervisor_negative.py
@@ -18,7 +18,7 @@
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import exceptions
-from tempest.test import attr
+from tempest import test
class HypervisorAdminNegativeV3Test(base.BaseV3ComputeAdminTest):
@@ -39,7 +39,7 @@
self.assertEqual(200, resp.status)
return hypers
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_show_nonexistent_hypervisor(self):
nonexistent_hypervisor_id = str(uuid.uuid4())
@@ -48,7 +48,7 @@
self.client.get_hypervisor_show_details,
nonexistent_hypervisor_id)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_show_hypervisor_with_non_admin_user(self):
hypers = self._list_hypervisors()
self.assertTrue(len(hypers) > 0)
@@ -58,7 +58,7 @@
self.non_adm_client.get_hypervisor_show_details,
hypers[0]['id'])
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_show_servers_with_non_admin_user(self):
hypers = self._list_hypervisors()
self.assertTrue(len(hypers) > 0)
@@ -68,7 +68,7 @@
self.non_adm_client.get_hypervisor_servers,
hypers[0]['id'])
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_show_servers_with_nonexistent_hypervisor(self):
nonexistent_hypervisor_id = str(uuid.uuid4())
@@ -77,13 +77,13 @@
self.client.get_hypervisor_servers,
nonexistent_hypervisor_id)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_get_hypervisor_stats_with_non_admin_user(self):
self.assertRaises(
exceptions.Unauthorized,
self.non_adm_client.get_hypervisor_stats)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_get_nonexistent_hypervisor_uptime(self):
nonexistent_hypervisor_id = str(uuid.uuid4())
@@ -92,7 +92,7 @@
self.client.get_hypervisor_uptime,
nonexistent_hypervisor_id)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_get_hypervisor_uptime_with_non_admin_user(self):
hypers = self._list_hypervisors()
self.assertTrue(len(hypers) > 0)
@@ -102,21 +102,21 @@
self.non_adm_client.get_hypervisor_uptime,
hypers[0]['id'])
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_get_hypervisor_list_with_non_admin_user(self):
# List of hypervisor and available services with non admin user
self.assertRaises(
exceptions.Unauthorized,
self.non_adm_client.get_hypervisor_list)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_get_hypervisor_list_details_with_non_admin_user(self):
# List of hypervisor details and available services with non admin user
self.assertRaises(
exceptions.Unauthorized,
self.non_adm_client.get_hypervisor_list_details)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_search_nonexistent_hypervisor(self):
nonexistent_hypervisor_name = data_utils.rand_name('test_hypervisor')
@@ -125,7 +125,7 @@
self.assertEqual(200, resp.status)
self.assertEqual(0, len(hypers))
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_search_hypervisor_with_non_admin_user(self):
hypers = self._list_hypervisors()
self.assertTrue(len(hypers) > 0)
diff --git a/tempest/api/compute/v3/admin/test_quotas.py b/tempest/api/compute/v3/admin/test_quotas.py
index b70e254..19c31fe 100644
--- a/tempest/api/compute/v3/admin/test_quotas.py
+++ b/tempest/api/compute/v3/admin/test_quotas.py
@@ -32,8 +32,7 @@
# NOTE(afazekas): these test cases should always create and use a new
# tenant most of them should be skipped if we can't do that
- cls.demo_tenant_id = cls.isolated_creds.get_primary_user().get(
- 'tenantId')
+ cls.demo_tenant_id = cls.client.tenant_id
cls.default_quota_set = set(('metadata_items',
'ram', 'floating_ips',
diff --git a/tempest/api/compute/v3/admin/test_quotas_negative.py b/tempest/api/compute/v3/admin/test_quotas_negative.py
index d138e80..307462f 100644
--- a/tempest/api/compute/v3/admin/test_quotas_negative.py
+++ b/tempest/api/compute/v3/admin/test_quotas_negative.py
@@ -30,8 +30,7 @@
# NOTE(afazekas): these test cases should always create and use a new
# tenant most of them should be skipped if we can't do that
- cls.demo_tenant_id = cls.isolated_creds.get_primary_user().get(
- 'tenantId')
+ cls.demo_tenant_id = cls.isolated_creds.get_primary_creds().tenant_id
# TODO(afazekas): Add dedicated tenant to the skiped quota tests
# it can be moved into the setUpClass as well
diff --git a/tempest/api/compute/v3/admin/test_servers_negative.py b/tempest/api/compute/v3/admin/test_servers_negative.py
index fba4cd1..a971463 100644
--- a/tempest/api/compute/v3/admin/test_servers_negative.py
+++ b/tempest/api/compute/v3/admin/test_servers_negative.py
@@ -20,7 +20,7 @@
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
-from tempest.test import attr
+from tempest import test
CONF = config.CONF
@@ -37,10 +37,7 @@
cls.client = cls.servers_admin_client
cls.non_adm_client = cls.servers_client
cls.flavors_client = cls.flavors_admin_client
- cls.identity_client = cls._get_identity_admin_client()
- tenant = cls.identity_client.get_tenant_by_name(
- cls.client.tenant_name)
- cls.tenant_id = tenant['id']
+ cls.tenant_id = cls.client.tenant_id
cls.s1_name = data_utils.rand_name('server')
resp, server = cls.create_test_server(name=cls.s1_name,
@@ -57,7 +54,7 @@
flavor_id = data_utils.rand_int_id(start=1000)
return flavor_id
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_resize_server_using_overlimit_ram(self):
flavor_name = data_utils.rand_name("flavor-")
flavor_id = self._get_unused_flavor_id()
@@ -75,7 +72,7 @@
self.servers[0]['id'],
flavor_ref['id'])
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_resize_server_using_overlimit_vcpus(self):
flavor_name = data_utils.rand_name("flavor-")
flavor_id = self._get_unused_flavor_id()
@@ -93,31 +90,31 @@
self.servers[0]['id'],
flavor_ref['id'])
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_reset_state_server_invalid_state(self):
self.assertRaises(exceptions.BadRequest,
self.client.reset_state, self.s1_id,
state='invalid')
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_reset_state_server_invalid_type(self):
self.assertRaises(exceptions.BadRequest,
self.client.reset_state, self.s1_id,
state=1)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_reset_state_server_nonexistent_server(self):
self.assertRaises(exceptions.NotFound,
self.client.reset_state, '999')
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_get_server_diagnostics_by_non_admin(self):
# Non-admin user can not view server diagnostics according to policy
self.assertRaises(exceptions.Unauthorized,
self.non_adm_client.get_server_diagnostics,
self.s1_id)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_migrate_non_existent_server(self):
# migrate a non existent server
self.assertRaises(exceptions.NotFound,
@@ -126,7 +123,7 @@
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_migrate_server_invalid_state(self):
# create server.
resp, server = self.create_test_server(wait_until='ACTIVE')
diff --git a/tempest/api/compute/v3/admin/test_services.py b/tempest/api/compute/v3/admin/test_services.py
index b367dad..e6efb70 100644
--- a/tempest/api/compute/v3/admin/test_services.py
+++ b/tempest/api/compute/v3/admin/test_services.py
@@ -15,7 +15,7 @@
# under the License.
from tempest.api.compute import base
-from tempest.test import attr
+from tempest import test
class ServicesAdminV3Test(base.BaseV3ComputeAdminTest):
@@ -29,13 +29,13 @@
super(ServicesAdminV3Test, cls).setUpClass()
cls.client = cls.services_admin_client
- @attr(type='gate')
+ @test.attr(type='gate')
def test_list_services(self):
resp, services = self.client.list_services()
self.assertEqual(200, resp.status)
self.assertNotEqual(0, len(services))
- @attr(type='gate')
+ @test.attr(type='gate')
def test_get_service_by_service_binary_name(self):
binary_name = 'nova-compute'
params = {'binary': binary_name}
@@ -45,7 +45,7 @@
for service in services:
self.assertEqual(binary_name, service['binary'])
- @attr(type='gate')
+ @test.attr(type='gate')
def test_get_service_by_host_name(self):
resp, services = self.client.list_services()
self.assertEqual(200, resp.status)
@@ -65,7 +65,7 @@
# on order.
self.assertEqual(sorted(s1), sorted(s2))
- @attr(type='gate')
+ @test.attr(type='gate')
def test_get_service_by_service_and_host_name(self):
resp, services = self.client.list_services()
host_name = services[0]['host']
diff --git a/tempest/api/compute/v3/admin/test_services_negative.py b/tempest/api/compute/v3/admin/test_services_negative.py
index 3168af2..6ac78d4 100644
--- a/tempest/api/compute/v3/admin/test_services_negative.py
+++ b/tempest/api/compute/v3/admin/test_services_negative.py
@@ -16,7 +16,7 @@
from tempest.api.compute import base
from tempest import exceptions
-from tempest.test import attr
+from tempest import test
class ServicesAdminNegativeV3Test(base.BaseV3ComputeAdminTest):
@@ -31,12 +31,12 @@
cls.client = cls.services_admin_client
cls.non_admin_client = cls.services_client
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_list_services_with_non_admin_user(self):
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.list_services)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_get_service_by_invalid_params(self):
# return all services if send the request with invalid parameter
resp, services = self.client.list_services()
@@ -45,7 +45,7 @@
self.assertEqual(200, resp.status)
self.assertEqual(len(services), len(services_xxx))
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_get_service_by_invalid_service_and_valid_host(self):
resp, services = self.client.list_services()
host_name = services[0]['host']
@@ -54,7 +54,7 @@
self.assertEqual(200, resp.status)
self.assertEqual(0, len(services))
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_get_service_with_valid_service_and_invalid_host(self):
resp, services = self.client.list_services()
binary_name = services[0]['binary']
diff --git a/tempest/api/compute/v3/certificates/test_certificates.py b/tempest/api/compute/v3/certificates/test_certificates.py
deleted file mode 100644
index 0ba44cb..0000000
--- a/tempest/api/compute/v3/certificates/test_certificates.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.api.compute import base
-from tempest.test import attr
-
-
-class CertificatesV3Test(base.BaseV3ComputeTest):
-
- @attr(type='gate')
- def test_create_root_certificate(self):
- # create certificates
- resp, body = self.certificates_client.create_certificate()
- self.assertEqual(201, resp.status)
- self.assertIn('data', body)
- self.assertIn('private_key', body)
-
- @attr(type='gate')
- def test_get_root_certificate(self):
- # get the root certificate
- resp, body = self.certificates_client.get_certificate('root')
- self.assertEqual(200, resp.status)
- self.assertIn('data', body)
- self.assertIn('private_key', body)
diff --git a/tempest/api/compute/v3/flavors/test_flavors.py b/tempest/api/compute/v3/flavors/test_flavors.py
deleted file mode 100644
index a0bbba6..0000000
--- a/tempest/api/compute/v3/flavors/test_flavors.py
+++ /dev/null
@@ -1,127 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.api.compute import base
-from tempest import test
-
-
-class FlavorsV3Test(base.BaseV3ComputeTest):
-
- @classmethod
- def setUpClass(cls):
- super(FlavorsV3Test, cls).setUpClass()
- cls.client = cls.flavors_client
-
- @test.attr(type='smoke')
- def test_list_flavors(self):
- # List of all flavors should contain the expected flavor
- resp, flavors = self.client.list_flavors()
- resp, flavor = self.client.get_flavor_details(self.flavor_ref)
- flavor_min_detail = {'id': flavor['id'], 'links': flavor['links'],
- 'name': flavor['name']}
- self.assertIn(flavor_min_detail, flavors)
-
- @test.attr(type='smoke')
- def test_list_flavors_with_detail(self):
- # Detailed list of all flavors should contain the expected flavor
- resp, flavors = self.client.list_flavors_with_detail()
- resp, flavor = self.client.get_flavor_details(self.flavor_ref)
- self.assertIn(flavor, flavors)
-
- @test.attr(type='smoke')
- def test_get_flavor(self):
- # The expected flavor details should be returned
- resp, flavor = self.client.get_flavor_details(self.flavor_ref)
- self.assertEqual(self.flavor_ref, flavor['id'])
-
- @test.attr(type='gate')
- def test_list_flavors_limit_results(self):
- # Only the expected number of flavors should be returned
- params = {'limit': 1}
- resp, flavors = self.client.list_flavors(params)
- self.assertEqual(1, len(flavors))
-
- @test.attr(type='gate')
- def test_list_flavors_detailed_limit_results(self):
- # Only the expected number of flavors (detailed) should be returned
- params = {'limit': 1}
- resp, flavors = self.client.list_flavors_with_detail(params)
- self.assertEqual(1, len(flavors))
-
- @test.attr(type='gate')
- def test_list_flavors_using_marker(self):
- # The list of flavors should start from the provided marker
- resp, flavors = self.client.list_flavors()
- flavor_id = flavors[0]['id']
-
- params = {'marker': flavor_id}
- resp, flavors = self.client.list_flavors(params)
- self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]),
- 'The list of flavors did not start after the marker.')
-
- @test.attr(type='gate')
- def test_list_flavors_detailed_using_marker(self):
- # The list of flavors should start from the provided marker
- resp, flavors = self.client.list_flavors_with_detail()
- flavor_id = flavors[0]['id']
-
- params = {'marker': flavor_id}
- resp, flavors = self.client.list_flavors_with_detail(params)
- self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]),
- 'The list of flavors did not start after the marker.')
-
- @test.attr(type='gate')
- def test_list_flavors_detailed_filter_by_min_disk(self):
- # The detailed list of flavors should be filtered by disk space
- resp, flavors = self.client.list_flavors_with_detail()
- flavors = sorted(flavors, key=lambda k: k['disk'])
- flavor_id = flavors[0]['id']
-
- params = {'min_disk': flavors[0]['disk'] + 1}
- resp, flavors = self.client.list_flavors_with_detail(params)
- self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]))
-
- @test.attr(type='gate')
- def test_list_flavors_detailed_filter_by_min_ram(self):
- # The detailed list of flavors should be filtered by RAM
- resp, flavors = self.client.list_flavors_with_detail()
- flavors = sorted(flavors, key=lambda k: k['ram'])
- flavor_id = flavors[0]['id']
-
- params = {'min_ram': flavors[0]['ram'] + 1}
- resp, flavors = self.client.list_flavors_with_detail(params)
- self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]))
-
- @test.attr(type='gate')
- def test_list_flavors_filter_by_min_disk(self):
- # The list of flavors should be filtered by disk space
- resp, flavors = self.client.list_flavors_with_detail()
- flavors = sorted(flavors, key=lambda k: k['disk'])
- flavor_id = flavors[0]['id']
-
- params = {'min_disk': flavors[0]['disk'] + 1}
- resp, flavors = self.client.list_flavors(params)
- self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]))
-
- @test.attr(type='gate')
- def test_list_flavors_filter_by_min_ram(self):
- # The list of flavors should be filtered by RAM
- resp, flavors = self.client.list_flavors_with_detail()
- flavors = sorted(flavors, key=lambda k: k['ram'])
- flavor_id = flavors[0]['id']
-
- params = {'min_ram': flavors[0]['ram'] + 1}
- resp, flavors = self.client.list_flavors(params)
- self.assertFalse(any([i for i in flavors if i['id'] == flavor_id]))
diff --git a/tempest/api/compute/v3/images/test_images_oneserver.py b/tempest/api/compute/v3/images/test_images_oneserver.py
index 3aab1e1..795437b 100644
--- a/tempest/api/compute/v3/images/test_images_oneserver.py
+++ b/tempest/api/compute/v3/images/test_images_oneserver.py
@@ -41,6 +41,11 @@
# Usually it means the server had a serious accident
self.__class__.server_id = self.rebuild_server(self.server_id)
+ def tearDown(self):
+ """Terminate test instances created after a test is executed."""
+ self.server_check_teardown()
+ super(ImagesOneServerV3Test, self).tearDown()
+
@classmethod
def setUpClass(cls):
super(ImagesOneServerV3Test, cls).setUpClass()
diff --git a/tempest/api/compute/v3/images/test_images_oneserver_negative.py b/tempest/api/compute/v3/images/test_images_oneserver_negative.py
index 7679eee..eed81c6 100644
--- a/tempest/api/compute/v3/images/test_images_oneserver_negative.py
+++ b/tempest/api/compute/v3/images/test_images_oneserver_negative.py
@@ -33,6 +33,7 @@
for image_id in self.image_ids:
self.client.delete_image(image_id)
self.image_ids.remove(image_id)
+ self.server_check_teardown()
super(ImagesOneServerNegativeV3Test, self).tearDown()
def setUp(self):
diff --git a/tempest/api/compute/v3/keypairs/test_keypairs.py b/tempest/api/compute/v3/keypairs/test_keypairs.py
deleted file mode 100644
index 668a295..0000000
--- a/tempest/api/compute/v3/keypairs/test_keypairs.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.api.compute import base
-from tempest.common.utils import data_utils
-from tempest import test
-
-
-class KeyPairsV3Test(base.BaseV3ComputeTest):
-
- @classmethod
- def setUpClass(cls):
- super(KeyPairsV3Test, cls).setUpClass()
- cls.client = cls.keypairs_client
-
- def _delete_keypair(self, keypair_name):
- resp, _ = self.client.delete_keypair(keypair_name)
- self.assertEqual(204, resp.status)
-
- def _create_keypair(self, keypair_name, pub_key=None):
- resp, body = self.client.create_keypair(keypair_name, pub_key)
- self.addCleanup(self._delete_keypair, keypair_name)
- return resp, body
-
- @test.attr(type='gate')
- def test_keypairs_create_list_delete(self):
- # Keypairs created should be available in the response list
- # Create 3 keypairs
- key_list = list()
- for i in range(3):
- k_name = data_utils.rand_name('keypair-')
- resp, keypair = self._create_keypair(k_name)
- # Need to pop these keys so that our compare doesn't fail later,
- # as the keypair dicts from list API doesn't have them.
- keypair.pop('private_key')
- keypair.pop('user_id')
- self.assertEqual(201, resp.status)
- key_list.append(keypair)
- # Fetch all keypairs and verify the list
- # has all created keypairs
- resp, fetched_list = self.client.list_keypairs()
- self.assertEqual(200, resp.status)
- # We need to remove the extra 'keypair' element in the
- # returned dict. See comment in keypairs_client.list_keypairs()
- new_list = list()
- for keypair in fetched_list:
- new_list.append(keypair['keypair'])
- fetched_list = new_list
- # Now check if all the created keypairs are in the fetched list
- missing_kps = [kp for kp in key_list if kp not in fetched_list]
- self.assertFalse(missing_kps,
- "Failed to find keypairs %s in fetched list"
- % ', '.join(m_key['name'] for m_key in missing_kps))
-
- @test.attr(type='gate')
- def test_keypair_create_delete(self):
- # Keypair should be created, verified and deleted
- k_name = data_utils.rand_name('keypair-')
- resp, keypair = self._create_keypair(k_name)
- self.assertEqual(201, resp.status)
- private_key = keypair['private_key']
- key_name = keypair['name']
- self.assertEqual(key_name, k_name,
- "The created keypair name is not equal "
- "to the requested name")
- self.assertTrue(private_key is not None,
- "Field private_key is empty or not found.")
-
- @test.attr(type='gate')
- def test_get_keypair_detail(self):
- # Keypair should be created, Got details by name and deleted
- k_name = data_utils.rand_name('keypair-')
- resp, keypair = self._create_keypair(k_name)
- resp, keypair_detail = self.client.get_keypair(k_name)
- self.assertEqual(200, resp.status)
- self.assertIn('name', keypair_detail)
- self.assertIn('public_key', keypair_detail)
- self.assertEqual(keypair_detail['name'], k_name,
- "The created keypair name is not equal "
- "to requested name")
- public_key = keypair_detail['public_key']
- self.assertTrue(public_key is not None,
- "Field public_key is empty or not found.")
-
- @test.attr(type='gate')
- def test_keypair_create_with_pub_key(self):
- # Keypair should be created with a given public key
- k_name = data_utils.rand_name('keypair-')
- pub_key = ("ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCs"
- "Ne3/1ILNCqFyfYWDeTKLD6jEXC2OQHLmietMWW+/vd"
- "aZq7KZEwO0jhglaFjU1mpqq4Gz5RX156sCTNM9vRbw"
- "KAxfsdF9laBYVsex3m3Wmui3uYrKyumsoJn2g9GNnG1P"
- "I1mrVjZ61i0GY3khna+wzlTpCCmy5HNlrmbj3XLqBUpip"
- "TOXmsnr4sChzC53KCd8LXuwc1i/CZPvF+3XipvAgFSE53pCt"
- "LOeB1kYMOBaiUPLQTWXR3JpckqFIQwhIH0zoHlJvZE8hh90"
- "XcPojYN56tI0OlrGqojbediJYD0rUsJu4weZpbn8vilb3JuDY+jws"
- "snSA8wzBx3A/8y9Pp1B nova@ubuntu")
- resp, keypair = self._create_keypair(k_name, pub_key)
- self.assertEqual(201, resp.status)
- self.assertFalse('private_key' in keypair,
- "Field private_key is not empty!")
- key_name = keypair['name']
- self.assertEqual(key_name, k_name,
- "The created keypair name is not equal "
- "to the requested name!")
diff --git a/tempest/api/compute/v3/servers/test_attach_interfaces.py b/tempest/api/compute/v3/servers/test_attach_interfaces.py
index c848f8c..43440c1 100644
--- a/tempest/api/compute/v3/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/v3/servers/test_attach_interfaces.py
@@ -16,7 +16,7 @@
from tempest.api.compute import base
from tempest import config
from tempest import exceptions
-from tempest.test import attr
+from tempest import test
import time
@@ -106,7 +106,7 @@
self.assertEqual(sorted(list1), sorted(list2))
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_create_list_show_delete_interfaces(self):
server, ifs = self._create_server_get_interfaces()
interface_count = len(ifs)
@@ -127,7 +127,7 @@
_ifs = self._test_delete_interface(server, ifs)
self.assertEqual(len(ifs) - 1, len(_ifs))
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_add_remove_fixed_ip(self):
# Add and Remove the fixed IP to server.
server, ifs = self._create_server_get_interfaces()
diff --git a/tempest/api/compute/v3/servers/test_create_server.py b/tempest/api/compute/v3/servers/test_create_server.py
index 68b4b9d..c59fe91 100644
--- a/tempest/api/compute/v3/servers/test_create_server.py
+++ b/tempest/api/compute/v3/servers/test_create_server.py
@@ -54,13 +54,6 @@
resp, cls.server = cls.client.get_server(cls.server_initial['id'])
@test.attr(type='smoke')
- def test_create_server_response(self):
- # Check that the required fields are returned with values
- self.assertEqual(202, self.resp.status)
- self.assertTrue(self.server_initial['id'] is not None)
- self.assertTrue(self.server_initial['admin_password'] is not None)
-
- @test.attr(type='smoke')
def test_verify_server_details(self):
# Verify the specified server attributes are set correctly
self.assertEqual(self.accessIPv4,
@@ -118,25 +111,8 @@
def setUpClass(cls):
cls.prepare_instance_network()
super(ServersWithSpecificFlavorV3Test, cls).setUpClass()
- cls.meta = {'hello': 'world'}
- cls.accessIPv4 = '1.1.1.1'
- cls.accessIPv6 = '0000:0000:0000:0000:0000:babe:220.12.22.2'
- cls.name = data_utils.rand_name('server')
- file_contents = 'This is a test file.'
- personality = [{'path': '/test.txt',
- 'contents': base64.b64encode(file_contents)}]
cls.client = cls.servers_client
cls.flavor_client = cls.flavors_admin_client
- cli_resp = cls.create_test_server(name=cls.name,
- meta=cls.meta,
- access_ip_v4=cls.accessIPv4,
- access_ip_v6=cls.accessIPv6,
- personality=personality,
- disk_config=cls.disk_config)
- cls.resp, cls.server_initial = cli_resp
- cls.password = cls.server_initial['admin_password']
- cls.client.wait_for_server_status(cls.server_initial['id'], 'ACTIVE')
- resp, cls.server = cls.client.get_server(cls.server_initial['id'])
@testtools.skipUnless(CONF.compute.run_ssh,
'Instance validation tests are disabled.')
@@ -144,7 +120,7 @@
def test_verify_created_server_ephemeral_disk(self):
# Verify that the ephemeral disk is created when creating server
- def create_flavor_with_extra_specs(self):
+ def create_flavor_with_extra_specs():
flavor_with_eph_disk_name = data_utils.rand_name('eph_flavor')
flavor_with_eph_disk_id = data_utils.rand_int_id(start=1000)
ram = 512
@@ -156,13 +132,13 @@
create_flavor(flavor_with_eph_disk_name,
ram, vcpus, disk,
flavor_with_eph_disk_id,
- ephemeral=1, swap=1024, rxtx=1))
- self.addCleanup(self.flavor_clean_up, flavor['id'])
- self.assertEqual(200, resp.status)
+ ephemeral=1, rxtx=1))
+ self.addCleanup(flavor_clean_up, flavor['id'])
+ self.assertEqual(201, resp.status)
return flavor['id']
- def create_flavor_without_extra_specs(self):
+ def create_flavor_without_extra_specs():
flavor_no_eph_disk_name = data_utils.rand_name('no_eph_flavor')
flavor_no_eph_disk_id = data_utils.rand_int_id(start=1000)
@@ -175,18 +151,18 @@
create_flavor(flavor_no_eph_disk_name,
ram, vcpus, disk,
flavor_no_eph_disk_id))
- self.addCleanup(self.flavor_clean_up, flavor['id'])
- self.assertEqual(200, resp.status)
+ self.addCleanup(flavor_clean_up, flavor['id'])
+ self.assertEqual(201, resp.status)
return flavor['id']
- def flavor_clean_up(self, flavor_id):
+ def flavor_clean_up(flavor_id):
resp, body = self.flavor_client.delete_flavor(flavor_id)
- self.assertEqual(resp.status, 202)
+ self.assertEqual(resp.status, 204)
self.flavor_client.wait_for_resource_deletion(flavor_id)
- flavor_with_eph_disk_id = self.create_flavor_with_extra_specs()
- flavor_no_eph_disk_id = self.create_flavor_without_extra_specs()
+ flavor_with_eph_disk_id = create_flavor_with_extra_specs()
+ flavor_no_eph_disk_id = create_flavor_without_extra_specs()
admin_pass = self.image_ssh_password
@@ -199,13 +175,17 @@
adminPass=admin_pass,
flavor=flavor_with_eph_disk_id))
# Get partition number of server without extra specs.
+ _, server_no_eph_disk = self.client.get_server(
+ server_no_eph_disk['id'])
linux_client = remote_client.RemoteClient(server_no_eph_disk,
- self.ssh_user, self.password)
- partition_num = len(linux_client.get_partitions())
-
+ self.ssh_user, admin_pass)
+ partition_num = len(linux_client.get_partitions().split('\n'))
+ _, server_with_eph_disk = self.client.get_server(
+ server_with_eph_disk['id'])
linux_client = remote_client.RemoteClient(server_with_eph_disk,
- self.ssh_user, self.password)
- self.assertEqual(partition_num + 1, linux_client.get_partitions())
+ self.ssh_user, admin_pass)
+ partition_num_emph = len(linux_client.get_partitions().split('\n'))
+ self.assertEqual(partition_num + 1, partition_num_emph)
class ServersV3TestManualDisk(ServersV3Test):
diff --git a/tempest/api/compute/v3/servers/test_list_servers_negative.py b/tempest/api/compute/v3/servers/test_list_servers_negative.py
index 9cbc4e0..18e5c67 100644
--- a/tempest/api/compute/v3/servers/test_list_servers_negative.py
+++ b/tempest/api/compute/v3/servers/test_list_servers_negative.py
@@ -39,7 +39,7 @@
cls.deleted_fixtures = []
cls.start_time = datetime.datetime.utcnow()
for x in moves.xrange(2):
- resp, srv = cls.create_test_server()
+ resp, srv = cls.create_test_server(wait_until='ACTIVE')
cls.existing_fixtures.append(srv)
resp, srv = cls.create_test_server()
diff --git a/tempest/api/compute/v3/servers/test_server_actions.py b/tempest/api/compute/v3/servers/test_server_actions.py
index 1495cb7..e098311 100644
--- a/tempest/api/compute/v3/servers/test_server_actions.py
+++ b/tempest/api/compute/v3/servers/test_server_actions.py
@@ -14,6 +14,7 @@
# under the License.
import testtools
+import urlparse
from tempest.api.compute import base
from tempest.common.utils import data_utils
@@ -39,6 +40,12 @@
# Rebuild server if something happened to it during a test
self.__class__.server_id = self.rebuild_server(self.server_id)
+ def tearDown(self):
+ _, server = self.client.get_server(self.server_id)
+ self.assertEqual(self.image_ref, server['image']['id'])
+ self.server_check_teardown()
+ super(ServerActionsV3Test, self).tearDown()
+
@classmethod
def setUpClass(cls):
cls.prepare_instance_network()
@@ -117,7 +124,6 @@
name=new_name,
metadata=meta,
admin_password=password)
- self.addCleanup(self.client.rebuild, self.server_id, self.image_ref)
# Verify the properties in the initial response are correct
self.assertEqual(self.server_id, rebuilt_server['id'])
@@ -138,6 +144,9 @@
password)
linux_client.validate_authentication()
+ if self.image_ref_alt != self.image_ref:
+ self.client.rebuild(self.server_id, self.image_ref)
+
@test.attr(type='gate')
def test_rebuild_server_in_stop_state(self):
# The server in stop state should be rebuilt using the provided
@@ -149,11 +158,7 @@
resp, server = self.client.stop(self.server_id)
self.assertEqual(202, resp.status)
self.client.wait_for_server_status(self.server_id, 'SHUTOFF')
- self.addCleanup(self.client.start, self.server_id)
resp, rebuilt_server = self.client.rebuild(self.server_id, new_image)
- self.addCleanup(self.client.wait_for_server_status, self.server_id,
- 'SHUTOFF')
- self.addCleanup(self.client.rebuild, self.server_id, old_image)
# Verify the properties in the initial response are correct
self.assertEqual(self.server_id, rebuilt_server['id'])
@@ -167,6 +172,12 @@
rebuilt_image_id = server['image']['id']
self.assertEqual(new_image, rebuilt_image_id)
+ # Restore to the original image (The tearDown will test it again)
+ if self.image_ref_alt != self.image_ref:
+ self.client.rebuild(self.server_id, old_image)
+ self.client.wait_for_server_status(self.server_id, 'SHUTOFF')
+ self.client.start(self.server_id)
+
def _detect_server_image_flavor(self, server_id):
# Detects the current server image flavor ref.
resp, server = self.client.get_server(server_id)
@@ -175,26 +186,46 @@
if current_flavor == self.flavor_ref else self.flavor_ref
return current_flavor, new_flavor_ref
- @testtools.skipUnless(CONF.compute_feature_enabled.resize,
- 'Resize not available.')
- @test.attr(type='smoke')
- def test_resize_server_confirm(self):
+ def _test_resize_server_confirm(self, stop=False):
# The server's RAM and disk space should be modified to that of
# the provided flavor
previous_flavor_ref, new_flavor_ref = \
self._detect_server_image_flavor(self.server_id)
+ if stop:
+ resp = self.servers_client.stop(self.server_id)[0]
+ self.assertEqual(202, resp.status)
+ self.servers_client.wait_for_server_status(self.server_id,
+ 'SHUTOFF')
+
resp, server = self.client.resize(self.server_id, new_flavor_ref)
self.assertEqual(202, resp.status)
self.client.wait_for_server_status(self.server_id, 'VERIFY_RESIZE')
self.client.confirm_resize(self.server_id)
- self.client.wait_for_server_status(self.server_id, 'ACTIVE')
+ expected_status = 'SHUTOFF' if stop else 'ACTIVE'
+ self.client.wait_for_server_status(self.server_id, expected_status)
resp, server = self.client.get_server(self.server_id)
self.assertEqual(new_flavor_ref, server['flavor']['id'])
+ if stop:
+ # NOTE(mriedem): tearDown requires the server to be started.
+ self.client.start(self.server_id)
+
+ @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+ 'Resize not available.')
+ @test.attr(type='smoke')
+ def test_resize_server_confirm(self):
+ self._test_resize_server_confirm(stop=False)
+
+ @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+ 'Resize not available.')
+ @test.attr(type='smoke')
+ def test_resize_server_confirm_from_stopped(self):
+ self._test_resize_server_confirm(stop=True)
+
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@test.attr(type='gate')
@@ -410,6 +441,12 @@
self.assertEqual(202, resp.status)
self.servers_client.wait_for_server_status(self.server_id, 'ACTIVE')
+ def _validate_url(self, url):
+ valid_scheme = ['http', 'https']
+ parsed_url = urlparse.urlparse(url)
+ self.assertNotEqual('None', parsed_url.hostname)
+ self.assertIn(parsed_url.scheme, valid_scheme)
+
@testtools.skipUnless(CONF.compute_feature_enabled.vnc_console,
'VNC Console feature is disabled')
@test.attr(type='gate')
@@ -419,6 +456,35 @@
for console_type in console_types:
resp, body = self.servers_client.get_vnc_console(self.server_id,
console_type)
- self.assertEqual(200, resp.status)
+ self.assertEqual(
+ 200, resp.status,
+ "Failed to get Console Type: %s" % (console_type))
self.assertEqual(console_type, body['type'])
self.assertNotEqual('', body['url'])
+ self._validate_url(body['url'])
+
+ @testtools.skipUnless(CONF.compute_feature_enabled.spice_console,
+ 'Spice Console feature is disabled.')
+ @test.attr(type='gate')
+ def test_get_spice_console(self):
+ # Get the Spice console of type "spice-html5"
+ console_type = 'spice-html5'
+ resp, body = self.servers_client.get_spice_console(self.server_id,
+ console_type)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(console_type, body['type'])
+ self.assertNotEqual('', body['url'])
+ self._validate_url(body['url'])
+
+ @testtools.skipUnless(CONF.compute_feature_enabled.rdp_console,
+ 'RDP Console feature is disabled.')
+ @test.attr(type='gate')
+ def test_get_rdp_console(self):
+ # Get the RDP console of type "rdp-html5"
+ console_type = 'rdp-html5'
+ resp, body = self.servers_client.get_rdp_console(self.server_id,
+ console_type)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(console_type, body['type'])
+ self.assertNotEqual('', body['url'])
+ self._validate_url(body['url'])
diff --git a/tempest/api/compute/v3/servers/test_server_metadata.py b/tempest/api/compute/v3/servers/test_server_metadata.py
index 0e4ef07..298cd3c 100644
--- a/tempest/api/compute/v3/servers/test_server_metadata.py
+++ b/tempest/api/compute/v3/servers/test_server_metadata.py
@@ -24,10 +24,7 @@
super(ServerMetadataV3Test, cls).setUpClass()
cls.client = cls.servers_client
cls.quotas = cls.quotas_client
- cls.admin_client = cls._get_identity_admin_client()
- resp, tenants = cls.admin_client.list_tenants()
- cls.tenant_id = [tnt['id'] for tnt in tenants if tnt['name'] ==
- cls.client.tenant_name][0]
+ cls.tenant_id = cls.client.tenant_id
resp, server = cls.create_test_server(meta={}, wait_until='ACTIVE')
cls.server_id = server['id']
diff --git a/tempest/api/compute/v3/servers/test_server_metadata_negative.py b/tempest/api/compute/v3/servers/test_server_metadata_negative.py
index ec2bc8c..f746be3 100644
--- a/tempest/api/compute/v3/servers/test_server_metadata_negative.py
+++ b/tempest/api/compute/v3/servers/test_server_metadata_negative.py
@@ -25,10 +25,7 @@
super(ServerMetadataV3NegativeTest, cls).setUpClass()
cls.client = cls.servers_client
cls.quotas = cls.quotas_client
- cls.admin_client = cls._get_identity_admin_client()
- resp, tenants = cls.admin_client.list_tenants()
- cls.tenant_id = [tnt['id'] for tnt in tenants if tnt['name'] ==
- cls.client.tenant_name][0]
+ cls.tenant_id = cls.client.tenant_id
resp, server = cls.create_test_server(meta={}, wait_until='ACTIVE')
cls.server_id = server['id']
diff --git a/tempest/api/compute/v3/servers/test_servers_negative.py b/tempest/api/compute/v3/servers/test_servers_negative.py
index c1d1935..827c4c4 100644
--- a/tempest/api/compute/v3/servers/test_servers_negative.py
+++ b/tempest/api/compute/v3/servers/test_servers_negative.py
@@ -37,6 +37,10 @@
except Exception:
self.__class__.server_id = self.rebuild_server(self.server_id)
+ def tearDown(self):
+ self.server_check_teardown()
+ super(ServersNegativeV3Test, self).tearDown()
+
@classmethod
def setUpClass(cls):
super(ServersNegativeV3Test, cls).setUpClass()
@@ -121,12 +125,11 @@
def test_pause_paused_server(self):
# Pause a paused server.
self.client.pause_server(self.server_id)
- self.addCleanup(self.client.unpause_server,
- self.server_id)
self.client.wait_for_server_status(self.server_id, 'PAUSED')
self.assertRaises(exceptions.Conflict,
self.client.pause_server,
self.server_id)
+ self.client.unpause_server(self.server_id)
@test.attr(type=['negative', 'gate'])
def test_rebuild_reboot_deleted_server(self):
@@ -330,13 +333,12 @@
def test_suspend_server_invalid_state(self):
# suspend a suspended server.
resp, _ = self.client.suspend_server(self.server_id)
- self.addCleanup(self.client.resume_server,
- self.server_id)
self.assertEqual(202, resp.status)
self.client.wait_for_server_status(self.server_id, 'SUSPENDED')
self.assertRaises(exceptions.Conflict,
self.client.suspend_server,
self.server_id)
+ self.client.resume_server(self.server_id)
@test.attr(type=['negative', 'gate'])
def test_resume_non_existent_server(self):
@@ -404,7 +406,6 @@
# shelve a shelved server.
resp, server = self.client.shelve_server(self.server_id)
self.assertEqual(202, resp.status)
- self.addCleanup(self.client.unshelve_server, self.server_id)
offload_time = CONF.compute.shelved_offload_time
if offload_time >= 0:
@@ -425,6 +426,8 @@
self.client.shelve_server,
self.server_id)
+ self.client.unshelve_server(self.server_id)
+
@test.attr(type=['negative', 'gate'])
def test_unshelve_non_existent_server(self):
# unshelve a non existent server
diff --git a/tempest/api/compute/v3/test_live_block_migration.py b/tempest/api/compute/v3/test_live_block_migration.py
index 33d2bd9..6ca37e6 100644
--- a/tempest/api/compute/v3/test_live_block_migration.py
+++ b/tempest/api/compute/v3/test_live_block_migration.py
@@ -17,7 +17,7 @@
from tempest.api.compute import base
from tempest import config
-from tempest.test import attr
+from tempest import test
CONF = config.CONF
@@ -85,7 +85,7 @@
@testtools.skipIf(not CONF.compute_feature_enabled.live_migration,
'Live migration not available')
- @attr(type='gate')
+ @test.attr(type='gate')
def test_live_block_migration(self):
# Live block migrate an instance to another host
if len(self._get_compute_hostnames()) < 2:
@@ -105,7 +105,7 @@
@testtools.skipIf(not CONF.compute_feature_enabled.
block_migrate_cinder_iscsi,
'Block Live migration not configured for iSCSI')
- @attr(type='gate')
+ @test.attr(type='gate')
def test_iscsi_volume(self):
# Live block migrate an instance to another host
if len(self._get_compute_hostnames()) < 2:
diff --git a/tempest/api/compute/v3/test_quotas.py b/tempest/api/compute/v3/test_quotas.py
index 3fe62e9..62a7556 100644
--- a/tempest/api/compute/v3/test_quotas.py
+++ b/tempest/api/compute/v3/test_quotas.py
@@ -23,13 +23,8 @@
def setUpClass(cls):
super(QuotasV3Test, cls).setUpClass()
cls.client = cls.quotas_client
- cls.admin_client = cls._get_identity_admin_client()
- resp, tenants = cls.admin_client.list_tenants()
- cls.tenant_id = [tnt['id'] for tnt in tenants if tnt['name'] ==
- cls.client.tenant_name][0]
- resp, users = cls.admin_client.list_users_for_tenant(cls.tenant_id)
- cls.user_id = [user['id'] for user in users if user['name'] ==
- cls.client.user][0]
+ cls.tenant_id = cls.client.tenant_id
+ cls.user_id = cls.client.user_id
cls.default_quota_set = set(('metadata_items',
'ram', 'floating_ips',
'fixed_ips', 'key_pairs',
diff --git a/tempest/api/compute/volumes/test_volumes_list.py b/tempest/api/compute/volumes/test_volumes_list.py
index 9867c64..25a8547 100644
--- a/tempest/api/compute/volumes/test_volumes_list.py
+++ b/tempest/api/compute/volumes/test_volumes_list.py
@@ -16,7 +16,7 @@
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import config
-from tempest.test import attr
+from tempest import test
CONF = config.CONF
@@ -75,7 +75,7 @@
cls.delete_volume(volume['id'])
super(VolumesTestJSON, cls).tearDownClass()
- @attr(type='gate')
+ @test.attr(type='gate')
def test_volume_list(self):
# Should return the list of Volumes
# Fetch all Volumes
@@ -91,7 +91,7 @@
', '.join(m_vol['displayName']
for m_vol in missing_volumes))
- @attr(type='gate')
+ @test.attr(type='gate')
def test_volume_list_with_details(self):
# Should return the list of Volumes with details
# Fetch all Volumes
@@ -107,7 +107,7 @@
', '.join(m_vol['displayName']
for m_vol in missing_volumes))
- @attr(type='gate')
+ @test.attr(type='gate')
def test_volume_list_param_limit(self):
# Return the list of volumes based on limit set
params = {'limit': 2}
@@ -117,7 +117,7 @@
self.assertEqual(len(fetched_vol_list), params['limit'],
"Failed to list volumes by limit set")
- @attr(type='gate')
+ @test.attr(type='gate')
def test_volume_list_with_detail_param_limit(self):
# Return the list of volumes with details based on limit set.
params = {'limit': 2}
@@ -128,7 +128,7 @@
self.assertEqual(len(fetched_vol_list), params['limit'],
"Failed to list volume details by limit set")
- @attr(type='gate')
+ @test.attr(type='gate')
def test_volume_list_param_offset_and_limit(self):
# Return the list of volumes based on offset and limit set.
# get all volumes list
@@ -146,7 +146,7 @@
all_vol_list[index + params['offset']]['id'],
"Failed to list volumes by offset and limit")
- @attr(type='gate')
+ @test.attr(type='gate')
def test_volume_list_with_detail_param_offset_and_limit(self):
# Return the list of volumes details based on offset and limit set.
# get all volumes list
diff --git a/tempest/api/compute/volumes/test_volumes_negative.py b/tempest/api/compute/volumes/test_volumes_negative.py
index cecaf62..5dfbad7 100644
--- a/tempest/api/compute/volumes/test_volumes_negative.py
+++ b/tempest/api/compute/volumes/test_volumes_negative.py
@@ -19,7 +19,7 @@
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
-from tempest.test import attr
+from tempest import test
CONF = config.CONF
@@ -34,7 +34,7 @@
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
raise cls.skipException(skip_msg)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_volume_get_nonexistent_volume_id(self):
# Negative: Should not be able to get details of nonexistent volume
# Creating a nonexistent volume id
@@ -42,7 +42,7 @@
self.assertRaises(exceptions.NotFound, self.client.get_volume,
str(uuid.uuid4()))
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_volume_delete_nonexistent_volume_id(self):
# Negative: Should not be able to delete nonexistent Volume
# Creating nonexistent volume id
@@ -50,7 +50,7 @@
self.assertRaises(exceptions.NotFound, self.client.delete_volume,
str(uuid.uuid4()))
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_create_volume_with_invalid_size(self):
# Negative: Should not be able to create volume with invalid size
# in request
@@ -59,7 +59,7 @@
self.assertRaises(exceptions.BadRequest, self.client.create_volume,
size='#$%', display_name=v_name, metadata=metadata)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_create_volume_with_out_passing_size(self):
# Negative: Should not be able to create volume without passing size
# in request
@@ -68,7 +68,7 @@
self.assertRaises(exceptions.BadRequest, self.client.create_volume,
size='', display_name=v_name, metadata=metadata)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_create_volume_with_size_zero(self):
# Negative: Should not be able to create volume with size zero
v_name = data_utils.rand_name('Volume-')
@@ -76,25 +76,25 @@
self.assertRaises(exceptions.BadRequest, self.client.create_volume,
size='0', display_name=v_name, metadata=metadata)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_get_invalid_volume_id(self):
# Negative: Should not be able to get volume with invalid id
self.assertRaises(exceptions.NotFound,
self.client.get_volume, '#$%%&^&^')
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_get_volume_without_passing_volume_id(self):
# Negative: Should not be able to get volume when empty ID is passed
self.assertRaises(exceptions.NotFound, self.client.get_volume, '')
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_delete_invalid_volume_id(self):
# Negative: Should not be able to delete volume when invalid ID is
# passed
self.assertRaises(exceptions.NotFound,
self.client.delete_volume, '!@#$%^&*()')
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_delete_volume_without_passing_volume_id(self):
# Negative: Should not be able to delete volume when empty ID is passed
self.assertRaises(exceptions.NotFound, self.client.delete_volume, '')
diff --git a/tempest/api/data_processing/base.py b/tempest/api/data_processing/base.py
index fc313f2..cc76880 100644
--- a/tempest/api/data_processing/base.py
+++ b/tempest/api/data_processing/base.py
@@ -1,17 +1,16 @@
-# Copyright (c) 2013 Mirantis Inc.
+# Copyright (c) 2014 Mirantis Inc.
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
from tempest import config
from tempest import exceptions
@@ -38,6 +37,8 @@
# add lists for watched resources
cls._node_group_templates = []
cls._cluster_templates = []
+ cls._data_sources = []
+ cls._job_binary_internals = []
@classmethod
def tearDownClass(cls):
@@ -45,6 +46,10 @@
cls.client.delete_cluster_template)
cls.cleanup_resources(getattr(cls, '_node_group_templates', []),
cls.client.delete_node_group_template)
+ cls.cleanup_resources(getattr(cls, '_data_sources', []),
+ cls.client.delete_data_source)
+ cls.cleanup_resources(getattr(cls, '_job_binary_internals', []),
+ cls.client.delete_job_binary_internal)
cls.clear_isolated_creds()
super(BaseDataProcessingTest, cls).tearDownClass()
@@ -96,3 +101,30 @@
cls._cluster_templates.append(body['id'])
return resp, body
+
+ @classmethod
+ def create_data_source(cls, name, type, url, **kwargs):
+ """Creates watched data source with specified params.
+
+ It supports passing additional params using kwargs and returns created
+ object. All resources created in this method will be automatically
+ removed in tearDownClass method.
+ """
+ resp, body = cls.client.create_data_source(name, type, url, **kwargs)
+ # store id of created data source
+ cls._data_sources.append(body['id'])
+
+ return resp, body
+
+ @classmethod
+ def create_job_binary_internal(cls, name, data):
+ """Creates watched job binary internal with specified params.
+
+ It returns created object. All resources created in this method will
+ be automatically removed in tearDownClass method.
+ """
+ resp, body = cls.client.create_job_binary_internal(name, data)
+ # store id of created job binary internal
+ cls._job_binary_internals.append(body['id'])
+
+ return resp, body
diff --git a/tempest/api/data_processing/test_data_sources.py b/tempest/api/data_processing/test_data_sources.py
new file mode 100644
index 0000000..c72e828
--- /dev/null
+++ b/tempest/api/data_processing/test_data_sources.py
@@ -0,0 +1,154 @@
+# Copyright (c) 2014 Mirantis Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.data_processing import base as dp_base
+from tempest.common.utils import data_utils
+from tempest import config
+from tempest import test
+
+CONF = config.CONF
+
+
+class DataSourceTest(dp_base.BaseDataProcessingTest):
+ @classmethod
+ def setUpClass(cls):
+ super(DataSourceTest, cls).setUpClass()
+ cls.swift_data_source_with_creds = {
+ 'url': 'swift://sahara-container.sahara/input-source',
+ 'description': 'Test data source',
+ 'credentials': {
+ 'user': CONF.identity.username,
+ 'password': CONF.identity.password
+ },
+ 'type': 'swift'
+ }
+ cls.swift_data_source = cls.swift_data_source_with_creds.copy()
+ del cls.swift_data_source['credentials']
+
+ cls.local_hdfs_data_source = {
+ 'url': 'input-source',
+ 'description': 'Test data source',
+ 'type': 'hdfs'
+ }
+
+ cls.external_hdfs_data_source = {
+ 'url': 'hdfs://172.18.168.2:8020/usr/hadoop/input-source',
+ 'description': 'Test data source',
+ 'type': 'hdfs'
+ }
+
+ def _create_data_source(self, source_body, source_name=None):
+ """Creates Data Source with optional name specified.
+
+ It creates a link to input-source file (it may not exist) and ensures
+ response status and source name. Returns id and name of created source.
+ """
+ if not source_name:
+ # generate random name if it's not specified
+ source_name = data_utils.rand_name('sahara-data-source')
+
+ # create data source
+ resp, body = self.create_data_source(source_name, **source_body)
+
+ # ensure that source created successfully
+ self.assertEqual(202, resp.status)
+ self.assertEqual(source_name, body['name'])
+ if source_body['type'] == 'swift':
+ source_body = self.swift_data_source
+ self.assertDictContainsSubset(source_body, body)
+
+ return body['id'], source_name
+
+ def _list_data_sources(self, source_info):
+ # check for data source in list
+ resp, sources = self.client.list_data_sources()
+ self.assertEqual(200, resp.status)
+ sources_info = [(source['id'], source['name']) for source in sources]
+ self.assertIn(source_info, sources_info)
+
+ def _get_data_source(self, source_id, source_name, source_body):
+ # check data source fetch by id
+ resp, source = self.client.get_data_source(source_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(source_name, source['name'])
+ self.assertDictContainsSubset(source_body, source)
+
+ def _delete_data_source(self, source_id):
+ # delete the data source by id
+ resp = self.client.delete_data_source(source_id)[0]
+ self.assertEqual(204, resp.status)
+
+ @test.attr(type='smoke')
+ def test_swift_data_source_create(self):
+ self._create_data_source(self.swift_data_source_with_creds)
+
+ @test.attr(type='smoke')
+ def test_swift_data_source_list(self):
+ source_info = self._create_data_source(
+ self.swift_data_source_with_creds)
+ self._list_data_sources(source_info)
+
+ @test.attr(type='smoke')
+ def test_swift_data_source_get(self):
+ source_id, source_name = self._create_data_source(
+ self.swift_data_source_with_creds)
+ self._get_data_source(source_id, source_name, self.swift_data_source)
+
+ @test.attr(type='smoke')
+ def test_swift_data_source_delete(self):
+ source_id = self._create_data_source(
+ self.swift_data_source_with_creds)[0]
+ self._delete_data_source(source_id)
+
+ @test.attr(type='smoke')
+ def test_local_hdfs_data_source_create(self):
+ self._create_data_source(self.local_hdfs_data_source)
+
+ @test.attr(type='smoke')
+ def test_local_hdfs_data_source_list(self):
+ source_info = self._create_data_source(self.local_hdfs_data_source)
+ self._list_data_sources(source_info)
+
+ @test.attr(type='smoke')
+ def test_local_hdfs_data_source_get(self):
+ source_id, source_name = self._create_data_source(
+ self.local_hdfs_data_source)
+ self._get_data_source(
+ source_id, source_name, self.local_hdfs_data_source)
+
+ @test.attr(type='smoke')
+ def test_local_hdfs_data_source_delete(self):
+ source_id = self._create_data_source(self.local_hdfs_data_source)[0]
+ self._delete_data_source(source_id)
+
+ @test.attr(type='smoke')
+ def test_external_hdfs_data_source_create(self):
+ self._create_data_source(self.external_hdfs_data_source)
+
+ @test.attr(type='smoke')
+ def test_external_hdfs_data_source_list(self):
+ source_info = self._create_data_source(self.external_hdfs_data_source)
+ self._list_data_sources(source_info)
+
+ @test.attr(type='smoke')
+ def test_external_hdfs_data_source_get(self):
+ source_id, source_name = self._create_data_source(
+ self.external_hdfs_data_source)
+ self._get_data_source(
+ source_id, source_name, self.external_hdfs_data_source)
+
+ @test.attr(type='smoke')
+ def test_external_hdfs_data_source_delete(self):
+ source_id = self._create_data_source(self.external_hdfs_data_source)[0]
+ self._delete_data_source(source_id)
diff --git a/tempest/api/data_processing/test_node_group_templates.py b/tempest/api/data_processing/test_node_group_templates.py
index ed4cf1f..04f98b4 100644
--- a/tempest/api/data_processing/test_node_group_templates.py
+++ b/tempest/api/data_processing/test_node_group_templates.py
@@ -1,21 +1,20 @@
-# Copyright (c) 2013 Mirantis Inc.
+# Copyright (c) 2014 Mirantis Inc.
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
from tempest.api.data_processing import base as dp_base
from tempest.common.utils import data_utils
-from tempest.test import attr
+from tempest import test
class NodeGroupTemplateTest(dp_base.BaseDataProcessingTest):
@@ -47,7 +46,7 @@
It creates template and ensures response status and template name.
Returns id and name of created template.
"""
- if template_name is None:
+ if not template_name:
# generate random name if it's not specified
template_name = data_utils.rand_name('sahara-ng-template')
@@ -58,48 +57,39 @@
# ensure that template created successfully
self.assertEqual(202, resp.status)
self.assertEqual(template_name, body['name'])
+ self.assertDictContainsSubset(self.node_group_template, body)
return body['id'], template_name
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_node_group_template_create(self):
- template_name = data_utils.rand_name('sahara-ng-template')
- resp, body = self.create_node_group_template(
- template_name, **self.node_group_template)
+ self._create_node_group_template()
- # check that template created successfully
- self.assertEqual(resp.status, 202)
- self.assertEqual(template_name, body['name'])
- self.assertDictContainsSubset(self.node_group_template, body)
-
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_node_group_template_list(self):
template_info = self._create_node_group_template()
# check for node group template in list
resp, templates = self.client.list_node_group_templates()
-
self.assertEqual(200, resp.status)
templates_info = [(template['id'], template['name'])
for template in templates]
self.assertIn(template_info, templates_info)
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_node_group_template_get(self):
template_id, template_name = self._create_node_group_template()
# check node group template fetch by id
resp, template = self.client.get_node_group_template(template_id)
-
self.assertEqual(200, resp.status)
self.assertEqual(template_name, template['name'])
self.assertDictContainsSubset(self.node_group_template, template)
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_node_group_template_delete(self):
template_id = self._create_node_group_template()[0]
# delete the node group template by id
resp = self.client.delete_node_group_template(template_id)[0]
-
self.assertEqual(204, resp.status)
diff --git a/tempest/api/data_processing/test_plugins.py b/tempest/api/data_processing/test_plugins.py
index 3b941d8..d643f23 100644
--- a/tempest/api/data_processing/test_plugins.py
+++ b/tempest/api/data_processing/test_plugins.py
@@ -1,20 +1,19 @@
-# Copyright (c) 2013 Mirantis Inc.
+# Copyright (c) 2014 Mirantis Inc.
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
from tempest.api.data_processing import base as dp_base
-from tempest.test import attr
+from tempest import test
class PluginsTest(dp_base.BaseDataProcessingTest):
@@ -24,31 +23,27 @@
It ensures response status and main plugins availability.
"""
resp, plugins = self.client.list_plugins()
-
self.assertEqual(200, resp.status)
-
- plugins_names = list([plugin['name'] for plugin in plugins])
+ plugins_names = [plugin['name'] for plugin in plugins]
self.assertIn('vanilla', plugins_names)
self.assertIn('hdp', plugins_names)
return plugins_names
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_plugin_list(self):
self._list_all_plugin_names()
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_plugin_get(self):
for plugin_name in self._list_all_plugin_names():
resp, plugin = self.client.get_plugin(plugin_name)
-
self.assertEqual(200, resp.status)
self.assertEqual(plugin_name, plugin['name'])
for plugin_version in plugin['versions']:
resp, detailed_plugin = self.client.get_plugin(plugin_name,
plugin_version)
-
self.assertEqual(200, resp.status)
self.assertEqual(plugin_name, detailed_plugin['name'])
diff --git a/tempest/api/database/base.py b/tempest/api/database/base.py
index 8add9ba..cf70d11 100644
--- a/tempest/api/database/base.py
+++ b/tempest/api/database/base.py
@@ -36,7 +36,9 @@
cls.catalog_type = CONF.database.catalog_type
cls.db_flavor_ref = CONF.database.db_flavor_ref
+ cls.db_current_version = CONF.database.db_current_version
os = cls.get_client_manager()
cls.os = os
cls.database_flavors_client = cls.os.database_flavors_client
+ cls.database_versions_client = cls.os.database_versions_client
diff --git a/tempest/api/compute/v3/certificates/__init__.py b/tempest/api/database/versions/__init__.py
similarity index 100%
copy from tempest/api/compute/v3/certificates/__init__.py
copy to tempest/api/database/versions/__init__.py
diff --git a/tempest/api/database/versions/test_versions.py b/tempest/api/database/versions/test_versions.py
new file mode 100644
index 0000000..6101f47
--- /dev/null
+++ b/tempest/api/database/versions/test_versions.py
@@ -0,0 +1,40 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.database import base
+from tempest import test
+
+
+class DatabaseVersionsTest(base.BaseDatabaseTest):
+ _interface = 'json'
+
+ @classmethod
+ def setUpClass(cls):
+ super(DatabaseVersionsTest, cls).setUpClass()
+ cls.client = cls.database_versions_client
+
+ @test.attr(type='smoke')
+ def test_list_db_versions(self):
+ resp, versions = self.client.list_db_versions()
+ self.assertEqual(200, resp.status)
+ self.assertTrue(len(versions) > 0, "No database versions found")
+ # List of all versions should contain the current version, and there
+ # should only be one 'current' version
+ current_versions = list()
+ for version in versions:
+ if 'CURRENT' == version['status']:
+ current_versions.append(version['id'])
+ self.assertEqual(1, len(current_versions))
+ self.assertIn(self.db_current_version, current_versions)
diff --git a/tempest/api/identity/admin/test_roles_negative.py b/tempest/api/identity/admin/test_roles_negative.py
index d311143..6f8f9b5 100644
--- a/tempest/api/identity/admin/test_roles_negative.py
+++ b/tempest/api/identity/admin/test_roles_negative.py
@@ -18,7 +18,7 @@
from tempest.api.identity import base
from tempest.common.utils import data_utils
from tempest import exceptions
-from tempest.test import attr
+from tempest import test
class RolesNegativeTestJSON(base.BaseIdentityV2AdminTest):
@@ -32,13 +32,13 @@
role = self.get_role_by_name(self.data.test_role)
return (user, tenant, role)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_list_roles_by_unauthorized_user(self):
# Non-administrator user should not be able to list roles
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.list_roles)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_list_roles_request_without_token(self):
# Request to list roles without a valid token should fail
token = self.client.auth_provider.get_token()
@@ -46,19 +46,19 @@
self.assertRaises(exceptions.Unauthorized, self.client.list_roles)
self.client.auth_provider.clear_auth()
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_role_create_blank_name(self):
# Should not be able to create a role with a blank name
self.assertRaises(exceptions.BadRequest, self.client.create_role, '')
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_create_role_by_unauthorized_user(self):
# Non-administrator user should not be able to create role
role_name = data_utils.rand_name(name='role-')
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.create_role, role_name)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_create_role_request_without_token(self):
# Request to create role without a valid token should fail
token = self.client.auth_provider.get_token()
@@ -68,7 +68,7 @@
self.client.create_role, role_name)
self.client.auth_provider.clear_auth()
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_role_create_duplicate(self):
# Role names should be unique
role_name = data_utils.rand_name(name='role-dup-')
@@ -79,7 +79,7 @@
self.assertRaises(exceptions.Conflict, self.client.create_role,
role_name)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_delete_role_by_unauthorized_user(self):
# Non-administrator user should not be able to delete role
role_name = data_utils.rand_name(name='role-')
@@ -90,7 +90,7 @@
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.delete_role, role_id)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_delete_role_request_without_token(self):
# Request to delete role without a valid token should fail
role_name = data_utils.rand_name(name='role-')
@@ -105,14 +105,14 @@
role_id)
self.client.auth_provider.clear_auth()
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_delete_role_non_existent(self):
# Attempt to delete a non existent role should fail
non_existent_role = str(uuid.uuid4().hex)
self.assertRaises(exceptions.NotFound, self.client.delete_role,
non_existent_role)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_assign_user_role_by_unauthorized_user(self):
# Non-administrator user should not be authorized to
# assign a role to user
@@ -121,7 +121,7 @@
self.non_admin_client.assign_user_role,
tenant['id'], user['id'], role['id'])
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_assign_user_role_request_without_token(self):
# Request to assign a role to a user without a valid token
(user, tenant, role) = self._get_role_params()
@@ -132,7 +132,7 @@
user['id'], role['id'])
self.client.auth_provider.clear_auth()
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_assign_user_role_for_non_existent_role(self):
# Attempt to assign a non existent role to user should fail
(user, tenant, role) = self._get_role_params()
@@ -140,7 +140,7 @@
self.assertRaises(exceptions.NotFound, self.client.assign_user_role,
tenant['id'], user['id'], non_existent_role)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_assign_user_role_for_non_existent_tenant(self):
# Attempt to assign a role on a non existent tenant should fail
(user, tenant, role) = self._get_role_params()
@@ -148,7 +148,7 @@
self.assertRaises(exceptions.NotFound, self.client.assign_user_role,
non_existent_tenant, user['id'], role['id'])
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_assign_duplicate_user_role(self):
# Duplicate user role should not get assigned
(user, tenant, role) = self._get_role_params()
@@ -156,7 +156,7 @@
self.assertRaises(exceptions.Conflict, self.client.assign_user_role,
tenant['id'], user['id'], role['id'])
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_remove_user_role_by_unauthorized_user(self):
# Non-administrator user should not be authorized to
# remove a user's role
@@ -168,7 +168,7 @@
self.non_admin_client.remove_user_role,
tenant['id'], user['id'], role['id'])
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_remove_user_role_request_without_token(self):
# Request to remove a user's role without a valid token
(user, tenant, role) = self._get_role_params()
@@ -182,7 +182,7 @@
user['id'], role['id'])
self.client.auth_provider.clear_auth()
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_remove_user_role_non_existent_role(self):
# Attempt to delete a non existent role from a user should fail
(user, tenant, role) = self._get_role_params()
@@ -193,7 +193,7 @@
self.assertRaises(exceptions.NotFound, self.client.remove_user_role,
tenant['id'], user['id'], non_existent_role)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_remove_user_role_non_existent_tenant(self):
# Attempt to remove a role from a non existent tenant should fail
(user, tenant, role) = self._get_role_params()
@@ -204,7 +204,7 @@
self.assertRaises(exceptions.NotFound, self.client.remove_user_role,
non_existent_tenant, user['id'], role['id'])
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_list_user_roles_by_unauthorized_user(self):
# Non-administrator user should not be authorized to list
# a user's roles
@@ -214,7 +214,7 @@
self.non_admin_client.list_user_roles, tenant['id'],
user['id'])
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_list_user_roles_request_without_token(self):
# Request to list user's roles without a valid token should fail
(user, tenant, role) = self._get_role_params()
diff --git a/tempest/api/identity/admin/test_services.py b/tempest/api/identity/admin/test_services.py
index e5cb348..0472e07 100644
--- a/tempest/api/identity/admin/test_services.py
+++ b/tempest/api/identity/admin/test_services.py
@@ -18,7 +18,7 @@
from tempest.api.identity import base
from tempest.common.utils import data_utils
from tempest import exceptions
-from tempest.test import attr
+from tempest import test
class ServicesTestJSON(base.BaseIdentityV2AdminTest):
@@ -32,7 +32,7 @@
self.assertRaises(exceptions.NotFound, self.client.get_service,
service_id)
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_create_get_delete_service(self):
# GET Service
# Creating a Service
@@ -66,7 +66,7 @@
self.assertEqual(fetched_service['description'],
service_data['description'])
- @attr(type='gate')
+ @test.attr(type='gate')
def test_create_service_without_description(self):
# Create a service only with name and type
name = data_utils.rand_name('service-')
@@ -80,7 +80,7 @@
self.assertIn('type', service)
self.assertEqual(type, service['type'])
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_list_services(self):
# Create, List, Verify and Delete Services
services = []
diff --git a/tempest/api/identity/admin/test_tenant_negative.py b/tempest/api/identity/admin/test_tenant_negative.py
index 44b54b8..622ad81 100644
--- a/tempest/api/identity/admin/test_tenant_negative.py
+++ b/tempest/api/identity/admin/test_tenant_negative.py
@@ -18,19 +18,19 @@
from tempest.api.identity import base
from tempest.common.utils import data_utils
from tempest import exceptions
-from tempest.test import attr
+from tempest import test
class TenantsNegativeTestJSON(base.BaseIdentityV2AdminTest):
_interface = 'json'
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_list_tenants_by_unauthorized_user(self):
# Non-administrator user should not be able to list tenants
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.list_tenants)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_list_tenant_request_without_token(self):
# Request to list tenants without a valid token should fail
token = self.client.auth_provider.get_token()
@@ -38,7 +38,7 @@
self.assertRaises(exceptions.Unauthorized, self.client.list_tenants)
self.client.auth_provider.clear_auth()
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_tenant_delete_by_unauthorized_user(self):
# Non-administrator user should not be able to delete a tenant
tenant_name = data_utils.rand_name(name='tenant-')
@@ -48,7 +48,7 @@
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.delete_tenant, tenant['id'])
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_tenant_delete_request_without_token(self):
# Request to delete a tenant without a valid token should fail
tenant_name = data_utils.rand_name(name='tenant-')
@@ -61,13 +61,13 @@
tenant['id'])
self.client.auth_provider.clear_auth()
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_delete_non_existent_tenant(self):
# Attempt to delete a non existent tenant should fail
self.assertRaises(exceptions.NotFound, self.client.delete_tenant,
str(uuid.uuid4().hex))
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_tenant_create_duplicate(self):
# Tenant names should be unique
tenant_name = data_utils.rand_name(name='tenant-')
@@ -82,14 +82,14 @@
self.assertRaises(exceptions.Conflict, self.client.create_tenant,
tenant_name)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_create_tenant_by_unauthorized_user(self):
# Non-administrator user should not be authorized to create a tenant
tenant_name = data_utils.rand_name(name='tenant-')
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.create_tenant, tenant_name)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_create_tenant_request_without_token(self):
# Create tenant request without a token should not be authorized
tenant_name = data_utils.rand_name(name='tenant-')
@@ -99,26 +99,26 @@
tenant_name)
self.client.auth_provider.clear_auth()
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_create_tenant_with_empty_name(self):
# Tenant name should not be empty
self.assertRaises(exceptions.BadRequest, self.client.create_tenant,
name='')
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_create_tenants_name_length_over_64(self):
# Tenant name length should not be greater than 64 characters
tenant_name = 'a' * 65
self.assertRaises(exceptions.BadRequest, self.client.create_tenant,
tenant_name)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_update_non_existent_tenant(self):
# Attempt to update a non existent tenant should fail
self.assertRaises(exceptions.NotFound, self.client.update_tenant,
str(uuid.uuid4().hex))
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_tenant_update_by_unauthorized_user(self):
# Non-administrator user should not be able to update a tenant
tenant_name = data_utils.rand_name(name='tenant-')
@@ -128,7 +128,7 @@
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.update_tenant, tenant['id'])
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_tenant_update_request_without_token(self):
# Request to update a tenant without a valid token should fail
tenant_name = data_utils.rand_name(name='tenant-')
diff --git a/tempest/api/identity/admin/test_tenants.py b/tempest/api/identity/admin/test_tenants.py
index 7ba46bb..b989664 100644
--- a/tempest/api/identity/admin/test_tenants.py
+++ b/tempest/api/identity/admin/test_tenants.py
@@ -17,13 +17,13 @@
from tempest.api.identity import base
from tempest.common.utils import data_utils
-from tempest.test import attr
+from tempest import test
class TenantsTestJSON(base.BaseIdentityV2AdminTest):
_interface = 'json'
- @attr(type='gate')
+ @test.attr(type='gate')
def test_tenant_list_delete(self):
# Create several tenants and delete them
tenants = []
@@ -48,7 +48,7 @@
found = [tenant for tenant in body if tenant['id'] in tenant_ids]
self.assertFalse(any(found), 'Tenants failed to delete')
- @attr(type='gate')
+ @test.attr(type='gate')
def test_tenant_create_with_description(self):
# Create tenant with a description
tenant_name = data_utils.rand_name(name='tenant-')
@@ -69,7 +69,7 @@
self.client.delete_tenant(tenant_id)
self.data.tenants.remove(tenant)
- @attr(type='gate')
+ @test.attr(type='gate')
def test_tenant_create_enabled(self):
# Create a tenant that is enabled
tenant_name = data_utils.rand_name(name='tenant-')
@@ -86,7 +86,7 @@
self.client.delete_tenant(tenant_id)
self.data.tenants.remove(tenant)
- @attr(type='gate')
+ @test.attr(type='gate')
def test_tenant_create_not_enabled(self):
# Create a tenant that is not enabled
tenant_name = data_utils.rand_name(name='tenant-')
@@ -105,7 +105,7 @@
self.client.delete_tenant(tenant_id)
self.data.tenants.remove(tenant)
- @attr(type='gate')
+ @test.attr(type='gate')
def test_tenant_update_name(self):
# Update name attribute of a tenant
t_name1 = data_utils.rand_name(name='tenant-')
@@ -133,7 +133,7 @@
self.client.delete_tenant(t_id)
self.data.tenants.remove(tenant)
- @attr(type='gate')
+ @test.attr(type='gate')
def test_tenant_update_desc(self):
# Update description attribute of a tenant
t_name = data_utils.rand_name(name='tenant-')
@@ -162,7 +162,7 @@
self.client.delete_tenant(t_id)
self.data.tenants.remove(tenant)
- @attr(type='gate')
+ @test.attr(type='gate')
def test_tenant_update_enable(self):
# Update the enabled attribute of a tenant
t_name = data_utils.rand_name(name='tenant-')
diff --git a/tempest/api/identity/admin/test_tokens.py b/tempest/api/identity/admin/test_tokens.py
index 7fec28d..08e12f0 100644
--- a/tempest/api/identity/admin/test_tokens.py
+++ b/tempest/api/identity/admin/test_tokens.py
@@ -15,13 +15,13 @@
from tempest.api.identity import base
from tempest.common.utils import data_utils
-from tempest.test import attr
+from tempest import test
class TokensTestJSON(base.BaseIdentityV2AdminTest):
_interface = 'json'
- @attr(type='gate')
+ @test.attr(type='gate')
def test_create_get_delete_token(self):
# get a token by username and password
user_name = data_utils.rand_name(name='user-')
@@ -56,7 +56,7 @@
resp, body = self.client.delete_token(token_id)
self.assertEqual(resp['status'], '204')
- @attr(type='gate')
+ @test.attr(type='gate')
def test_rescope_token(self):
"""An unscoped token can be requested, that token can be used to
request a scoped token.
diff --git a/tempest/api/identity/admin/test_users.py b/tempest/api/identity/admin/test_users.py
index a4e6c17..e2c1066 100644
--- a/tempest/api/identity/admin/test_users.py
+++ b/tempest/api/identity/admin/test_users.py
@@ -206,6 +206,25 @@
"Failed to find user %s in fetched list" %
', '.join(m_user for m_user in missing_users))
+ @test.attr(type='smoke')
+ def test_update_user_password(self):
+ # Test case to check if updating of user password is successful.
+ self.data.setup_test_user()
+ # Updating the user with new password
+ new_pass = data_utils.rand_name('pass-')
+ resp, update_user = self.client.update_user_password(
+ self.data.user['id'], new_pass)
+ # Assert response body of update user.
+ self.assertEqual(200, resp.status)
+ self.assertEqual(update_user['id'], self.data.user['id'])
+
+ # Validate the updated password
+ # Get a token
+ resp, body = self.token_client.auth(self.data.test_user, new_pass,
+ self.data.test_tenant)
+ self.assertEqual('200', resp['status'])
+ self.assertTrue('id' in body['token'])
+
class UsersTestXML(UsersTestJSON):
_interface = 'xml'
diff --git a/tempest/api/identity/admin/test_users_negative.py b/tempest/api/identity/admin/test_users_negative.py
index 4e8ebe5..a584a7b 100644
--- a/tempest/api/identity/admin/test_users_negative.py
+++ b/tempest/api/identity/admin/test_users_negative.py
@@ -18,7 +18,7 @@
from tempest.api.identity import base
from tempest.common.utils import data_utils
from tempest import exceptions
-from tempest.test import attr
+from tempest import test
class UsersNegativeTestJSON(base.BaseIdentityV2AdminTest):
@@ -31,7 +31,7 @@
cls.alt_password = data_utils.rand_name('pass_')
cls.alt_email = cls.alt_user + '@testmail.tm'
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_create_user_by_unauthorized_user(self):
# Non-administrator should not be authorized to create a user
self.data.setup_test_tenant()
@@ -40,7 +40,7 @@
self.alt_password, self.data.tenant['id'],
self.alt_email)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_create_user_with_empty_name(self):
# User with an empty name should not be created
self.data.setup_test_tenant()
@@ -48,7 +48,7 @@
self.alt_password, self.data.tenant['id'],
self.alt_email)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_create_user_with_name_length_over_255(self):
# Length of user name filed should be restricted to 255 characters
self.data.setup_test_tenant()
@@ -56,7 +56,7 @@
'a' * 256, self.alt_password,
self.data.tenant['id'], self.alt_email)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_create_user_with_duplicate_name(self):
# Duplicate user should not be created
self.data.setup_test_user()
@@ -64,14 +64,14 @@
self.data.test_user, self.data.test_password,
self.data.tenant['id'], self.data.test_email)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_create_user_for_non_existent_tenant(self):
# Attempt to create a user in a non-existent tenant should fail
self.assertRaises(exceptions.NotFound, self.client.create_user,
self.alt_user, self.alt_password, '49ffgg99999',
self.alt_email)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_create_user_request_without_a_token(self):
# Request to create a user without a valid token should fail
self.data.setup_test_tenant()
@@ -86,7 +86,7 @@
# Unset the token to allow further tests to generate a new token
self.client.auth_provider.clear_auth()
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_create_user_with_enabled_non_bool(self):
# Attempt to create a user with valid enabled para should fail
self.data.setup_test_tenant()
@@ -96,7 +96,7 @@
self.data.tenant['id'],
self.alt_email, enabled=3)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_update_user_for_non_existent_user(self):
# Attempt to update a user non-existent user should fail
user_name = data_utils.rand_name('user-')
@@ -104,7 +104,7 @@
self.assertRaises(exceptions.NotFound, self.client.update_user,
non_existent_id, name=user_name)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_update_user_request_without_a_token(self):
# Request to update a user without a valid token should fail
@@ -118,14 +118,14 @@
# Unset the token to allow further tests to generate a new token
self.client.auth_provider.clear_auth()
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_update_user_by_unauthorized_user(self):
# Non-administrator should not be authorized to update user
self.data.setup_test_tenant()
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.update_user, self.alt_user)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_delete_users_by_unauthorized_user(self):
# Non-administrator user should not be authorized to delete a user
self.data.setup_test_user()
@@ -133,13 +133,13 @@
self.non_admin_client.delete_user,
self.data.user['id'])
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_delete_non_existent_user(self):
# Attempt to delete a non-existent user should fail
self.assertRaises(exceptions.NotFound, self.client.delete_user,
'junk12345123')
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_delete_user_request_without_a_token(self):
# Request to delete a user without a valid token should fail
@@ -153,7 +153,7 @@
# Unset the token to allow further tests to generate a new token
self.client.auth_provider.clear_auth()
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_authentication_for_disabled_user(self):
# Disabled user's token should not get authenticated
self.data.setup_test_user()
@@ -163,7 +163,7 @@
self.data.test_password,
self.data.test_tenant)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_authentication_when_tenant_is_disabled(self):
# User's token for a disabled tenant should not be authenticated
self.data.setup_test_user()
@@ -173,7 +173,7 @@
self.data.test_password,
self.data.test_tenant)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_authentication_with_invalid_tenant(self):
# User's token for an invalid tenant should not be authenticated
self.data.setup_test_user()
@@ -182,7 +182,7 @@
self.data.test_password,
'junktenant1234')
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_authentication_with_invalid_username(self):
# Non-existent user's token should not get authenticated
self.data.setup_test_user()
@@ -190,7 +190,7 @@
'junkuser123', self.data.test_password,
self.data.test_tenant)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_authentication_with_invalid_password(self):
# User's token with invalid password should not be authenticated
self.data.setup_test_user()
@@ -198,14 +198,14 @@
self.data.test_user, 'junkpass1234',
self.data.test_tenant)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_get_users_by_unauthorized_user(self):
# Non-administrator user should not be authorized to get user list
self.data.setup_test_user()
self.assertRaises(exceptions.Unauthorized,
self.non_admin_client.get_users)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_get_users_request_without_token(self):
# Request to get list of users without a valid token should fail
token = self.client.auth_provider.get_token()
@@ -213,7 +213,7 @@
self.assertRaises(exceptions.Unauthorized, self.client.get_users)
self.client.auth_provider.clear_auth()
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_list_users_with_invalid_tenant(self):
# Should not be able to return a list of all
# users for a non-existent tenant
diff --git a/tempest/api/identity/admin/v3/test_domains.py b/tempest/api/identity/admin/v3/test_domains.py
index 086d235..a1e6cde 100644
--- a/tempest/api/identity/admin/v3/test_domains.py
+++ b/tempest/api/identity/admin/v3/test_domains.py
@@ -16,7 +16,7 @@
from tempest.api.identity import base
from tempest.common.utils import data_utils
-from tempest.test import attr
+from tempest import test
class DomainsTestJSON(base.BaseIdentityV3AdminTest):
@@ -29,7 +29,7 @@
resp, _ = self.client.delete_domain(domain_id)
self.assertEqual(204, resp.status)
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_list_domains(self):
# Test to list domains
domain_ids = list()
@@ -49,7 +49,7 @@
missing_doms = [d for d in domain_ids if d not in fetched_ids]
self.assertEqual(0, len(missing_doms))
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_create_update_delete_domain(self):
d_name = data_utils.rand_name('domain-')
d_desc = data_utils.rand_name('domain-desc-')
diff --git a/tempest/api/identity/admin/v3/test_endpoints_negative.py b/tempest/api/identity/admin/v3/test_endpoints_negative.py
index 28615a4..5b46f89 100644
--- a/tempest/api/identity/admin/v3/test_endpoints_negative.py
+++ b/tempest/api/identity/admin/v3/test_endpoints_negative.py
@@ -18,7 +18,7 @@
from tempest.api.identity import base
from tempest.common.utils import data_utils
from tempest import exceptions
-from tempest.test import attr
+from tempest import test
class EndpointsNegativeTestJSON(base.BaseIdentityV3AdminTest):
@@ -45,7 +45,7 @@
cls.service_client.delete_service(s)
super(EndpointsNegativeTestJSON, cls).tearDownClass()
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_create_with_enabled_False(self):
# Enabled should be a boolean, not a string like 'False'
interface = 'public'
@@ -55,7 +55,7 @@
self.service_id, interface, url, region=region,
force_enabled='False')
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_create_with_enabled_True(self):
# Enabled should be a boolean, not a string like 'True'
interface = 'public'
@@ -79,12 +79,12 @@
self.assertRaises(exceptions.BadRequest, self.client.update_endpoint,
endpoint_for_update['id'], force_enabled=enabled)
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_update_with_enabled_False(self):
# Enabled should be a boolean, not a string like 'False'
self._assert_update_raises_bad_request('False')
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_update_with_enabled_True(self):
# Enabled should be a boolean, not a string like 'True'
self._assert_update_raises_bad_request('True')
diff --git a/tempest/api/identity/admin/v3/test_policies.py b/tempest/api/identity/admin/v3/test_policies.py
index 3e04b5f..0e79440 100644
--- a/tempest/api/identity/admin/v3/test_policies.py
+++ b/tempest/api/identity/admin/v3/test_policies.py
@@ -15,7 +15,7 @@
from tempest.api.identity import base
from tempest.common.utils import data_utils
-from tempest.test import attr
+from tempest import test
class PoliciesTestJSON(base.BaseIdentityV3AdminTest):
@@ -25,7 +25,7 @@
resp, _ = self.policy_client.delete_policy(policy_id)
self.assertEqual(204, resp.status)
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_list_policies(self):
# Test to list policies
policy_ids = list()
@@ -46,7 +46,7 @@
missing_pols = [p for p in policy_ids if p not in fetched_ids]
self.assertEqual(0, len(missing_pols))
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_create_update_delete_policy(self):
# Test to update policy
blob = data_utils.rand_name('BlobName-')
diff --git a/tempest/api/identity/admin/v3/test_regions.py b/tempest/api/identity/admin/v3/test_regions.py
new file mode 100644
index 0000000..03974e4
--- /dev/null
+++ b/tempest/api/identity/admin/v3/test_regions.py
@@ -0,0 +1,102 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.identity import base
+from tempest.common.utils import data_utils
+from tempest import exceptions
+from tempest import test
+
+
+class RegionsTestJSON(base.BaseIdentityV3AdminTest):
+ _interface = 'json'
+
+ @classmethod
+ @test.safe_setup
+ def setUpClass(cls):
+ super(RegionsTestJSON, cls).setUpClass()
+ cls.setup_regions = list()
+ cls.client = cls.region_client
+ for i in range(2):
+ r_description = data_utils.rand_name('description-')
+ _, region = cls.client.create_region(r_description)
+ cls.setup_regions.append(region)
+
+ @classmethod
+ def tearDownClass(cls):
+ for r in cls.setup_regions:
+ cls.client.delete_region(r['id'])
+ super(RegionsTestJSON, cls).tearDownClass()
+
+ def _delete_region(self, region_id):
+ resp, _ = self.client.delete_region(region_id)
+ self.assertEqual(204, resp.status)
+ self.assertRaises(exceptions.NotFound,
+ self.client.get_region, region_id)
+
+ @test.attr(type='gate')
+ def test_create_update_get_delete_region(self):
+ r_description = data_utils.rand_name('description-')
+ resp, region = self.client.create_region(
+ r_description, parent_region_id=self.setup_regions[0]['id'])
+ self.assertEqual(201, resp.status)
+ self.addCleanup(self._delete_region, region['id'])
+ self.assertEqual(r_description, region['description'])
+ self.assertEqual(self.setup_regions[0]['id'],
+ region['parent_region_id'])
+ # Update region with new description and parent ID
+ r_alt_description = data_utils.rand_name('description-')
+ resp, region = self.client.update_region(
+ region['id'],
+ description=r_alt_description,
+ parent_region_id=self.setup_regions[1]['id'])
+ self.assertEqual(200, resp.status)
+ self.assertEqual(r_alt_description, region['description'])
+ self.assertEqual(self.setup_regions[1]['id'],
+ region['parent_region_id'])
+ # Get the details of region
+ resp, region = self.client.get_region(region['id'])
+ self.assertEqual(200, resp.status)
+ self.assertEqual(r_alt_description, region['description'])
+ self.assertEqual(self.setup_regions[1]['id'],
+ region['parent_region_id'])
+
+ @test.attr(type='smoke')
+ def test_create_region_with_specific_id(self):
+ # Create a region with a specific id
+ r_region_id = data_utils.rand_uuid()
+ r_description = data_utils.rand_name('description-')
+ resp, region = self.client.create_region(
+ r_description, unique_region_id=r_region_id)
+ self.addCleanup(self._delete_region, region['id'])
+ # Asserting Create Region with specific id response body
+ self.assertEqual(201, resp.status)
+ self.assertEqual(r_region_id, region['id'])
+ self.assertEqual(r_description, region['description'])
+
+ @test.attr(type='gate')
+ def test_list_regions(self):
+ # Get a list of regions
+ resp, fetched_regions = self.client.list_regions()
+ self.assertEqual(200, resp.status)
+ missing_regions =\
+ [e for e in self.setup_regions if e not in fetched_regions]
+ # Asserting List Regions response
+ self.assertEqual(0, len(missing_regions),
+ "Failed to find region %s in fetched list" %
+ ', '.join(str(e) for e in missing_regions))
+
+
+class RegionsTestXML(RegionsTestJSON):
+ _interface = 'xml'
diff --git a/tempest/api/identity/admin/v3/test_services.py b/tempest/api/identity/admin/v3/test_services.py
index c5d4ddf..36e5327 100644
--- a/tempest/api/identity/admin/v3/test_services.py
+++ b/tempest/api/identity/admin/v3/test_services.py
@@ -16,13 +16,13 @@
from tempest.api.identity import base
from tempest.common.utils import data_utils
-from tempest.test import attr
+from tempest import test
class ServicesTestJSON(base.BaseIdentityV3AdminTest):
_interface = 'json'
- @attr(type='gate')
+ @test.attr(type='gate')
def test_update_service(self):
# Update description attribute of service
name = data_utils.rand_name('service-')
diff --git a/tempest/api/identity/admin/v3/test_tokens.py b/tempest/api/identity/admin/v3/test_tokens.py
index ebc1cac..fe3eb03 100644
--- a/tempest/api/identity/admin/v3/test_tokens.py
+++ b/tempest/api/identity/admin/v3/test_tokens.py
@@ -16,13 +16,13 @@
from tempest.api.identity import base
from tempest.common.utils import data_utils
from tempest import exceptions
-from tempest.test import attr
+from tempest import test
class TokensV3TestJSON(base.BaseIdentityV3AdminTest):
_interface = 'json'
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_tokens(self):
# Valid user's token is authenticated
# Create a User
@@ -50,7 +50,7 @@
self.assertRaises(exceptions.NotFound, self.client.get_token,
subject_token)
- @attr(type='gate')
+ @test.attr(type='gate')
def test_rescope_token(self):
"""Rescope a token.
diff --git a/tempest/api/identity/admin/v3/test_trusts.py b/tempest/api/identity/admin/v3/test_trusts.py
index cae20ad..8e3a7d1 100644
--- a/tempest/api/identity/admin/v3/test_trusts.py
+++ b/tempest/api/identity/admin/v3/test_trusts.py
@@ -13,6 +13,7 @@
import datetime
import re
from tempest.api.identity import base
+from tempest import auth
from tempest import clients
from tempest.common.utils import data_utils
from tempest import config
@@ -88,10 +89,13 @@
self.assertIsNotNone(self.trustee_user_id)
# Initialize a new client with the trustor credentials
- os = clients.Manager(username=self.trustor_username,
- password=self.trustor_password,
- tenant_name=self.trustor_project_name,
- interface=self._interface)
+ creds = auth.get_credentials(
+ username=self.trustor_username,
+ password=self.trustor_password,
+ tenant_name=self.trustor_project_name)
+ os = clients.Manager(
+ credentials=creds,
+ interface=self._interface)
self.trustor_client = os.identity_v3_client
def cleanup_user_and_roles(self):
diff --git a/tempest/api/identity/admin/v3/test_users.py b/tempest/api/identity/admin/v3/test_users.py
index e1d1543..7316c7f 100644
--- a/tempest/api/identity/admin/v3/test_users.py
+++ b/tempest/api/identity/admin/v3/test_users.py
@@ -15,13 +15,13 @@
from tempest.api.identity import base
from tempest.common.utils import data_utils
-from tempest.test import attr
+from tempest import test
class UsersV3TestJSON(base.BaseIdentityV3AdminTest):
_interface = 'json'
- @attr(type='gate')
+ @test.attr(type='gate')
def test_user_update(self):
# Test case to check if updating of user attributes is successful.
# Creating first user
@@ -66,7 +66,7 @@
self.assertEqual(u_email2, new_user_get['email'])
self.assertEqual('false', str(new_user_get['enabled']).lower())
- @attr(type='gate')
+ @test.attr(type='gate')
def test_list_user_projects(self):
# List the projects that a user has access upon
assigned_project_ids = list()
@@ -120,7 +120,7 @@
', '.join(m_project for m_project
in missing_projects))
- @attr(type='gate')
+ @test.attr(type='gate')
def test_get_user(self):
# Get a user detail
self.data.setup_test_v3_user()
diff --git a/tempest/api/identity/base.py b/tempest/api/identity/base.py
index a5bf248..697057f 100644
--- a/tempest/api/identity/base.py
+++ b/tempest/api/identity/base.py
@@ -14,6 +14,7 @@
# under the License.
+from tempest import auth
from tempest import clients
from tempest.common.utils import data_utils
from tempest import config
@@ -95,6 +96,7 @@
cls.client = cls.os_adm.identity_v3_client
cls.token = cls.os_adm.token_v3_client
cls.endpoints_client = cls.os_adm.endpoints_client
+ cls.region_client = cls.os_adm.region_client
cls.data = DataGenerator(cls.client)
cls.non_admin_client = cls.os.identity_v3_client
cls.service_client = cls.os_adm.service_client
@@ -120,6 +122,14 @@
self.projects = []
self.v3_roles = []
+ @property
+ def test_credentials(self):
+ return auth.get_credentials(username=self.test_user,
+ user_id=self.user['id'],
+ password=self.test_password,
+ tenant_name=self.test_tenant,
+ tenant_id=self.tenant['id'])
+
def setup_test_user(self):
"""Set up a test user."""
self.setup_test_tenant()
diff --git a/tempest/api/image/base.py b/tempest/api/image/base.py
index e439238..9981292 100644
--- a/tempest/api/image/base.py
+++ b/tempest/api/image/base.py
@@ -42,11 +42,7 @@
skip_msg = ("%s skipped as glance is not available" % cls.__name__)
raise cls.skipException(skip_msg)
if CONF.compute.allow_tenant_isolation:
- creds = cls.isolated_creds.get_primary_creds()
- username, tenant_name, password = creds
- cls.os = clients.Manager(username=username,
- password=password,
- tenant_name=tenant_name)
+ cls.os = clients.Manager(cls.isolated_creds.get_primary_creds())
else:
cls.os = clients.Manager()
@@ -96,19 +92,12 @@
def setUpClass(cls):
super(BaseV1ImageMembersTest, cls).setUpClass()
if CONF.compute.allow_tenant_isolation:
- creds = cls.isolated_creds.get_alt_creds()
- username, tenant_name, password = creds
- cls.os_alt = clients.Manager(username=username,
- password=password,
- tenant_name=tenant_name)
- cls.alt_tenant_id = cls.isolated_creds.get_alt_tenant()['id']
+ cls.os_alt = clients.Manager(cls.isolated_creds.get_alt_creds())
else:
cls.os_alt = clients.AltManager()
- identity_client = cls._get_identity_admin_client()
- cls.alt_tenant_id = identity_client.get_tenant_by_name(
- cls.os_alt.credentials['tenant_name'])['id']
cls.alt_img_cli = cls.os_alt.image_client
+ cls.alt_tenant_id = cls.alt_img_cli.tenant_id
def _create_image(self):
image_file = StringIO.StringIO('*' * 1024)
@@ -139,20 +128,12 @@
super(BaseV2MemberImageTest, cls).setUpClass()
if CONF.compute.allow_tenant_isolation:
creds = cls.isolated_creds.get_alt_creds()
- username, tenant_name, password = creds
- cls.os_alt = clients.Manager(username=username,
- password=password,
- tenant_name=tenant_name,
- interface=cls._interface)
- cls.alt_tenant_id = cls.isolated_creds.get_alt_tenant()['id']
+ cls.os_alt = clients.Manager(creds)
else:
cls.os_alt = clients.AltManager()
- alt_tenant_name = cls.os_alt.credentials['tenant_name']
- identity_client = cls._get_identity_admin_client()
- cls.alt_tenant_id = identity_client.get_tenant_by_name(
- alt_tenant_name)['id']
cls.os_img_client = cls.os.image_client_v2
cls.alt_img_client = cls.os_alt.image_client_v2
+ cls.alt_tenant_id = cls.alt_img_client.tenant_id
def _list_image_ids_as_alt(self):
_, image_list = self.alt_img_client.image_list()
diff --git a/tempest/api/image/v1/test_images.py b/tempest/api/image/v1/test_images.py
index 8466c7b..2df3f7f 100644
--- a/tempest/api/image/v1/test_images.py
+++ b/tempest/api/image/v1/test_images.py
@@ -33,12 +33,12 @@
resp, body = self.create_image(name='New Name',
container_format='bare',
disk_format='raw',
- is_public=True,
+ is_public=False,
properties=properties)
self.assertIn('id', body)
image_id = body.get('id')
self.assertEqual('New Name', body.get('name'))
- self.assertTrue(body.get('is_public'))
+ self.assertFalse(body.get('is_public'))
self.assertEqual('queued', body.get('status'))
for key, val in properties.items():
self.assertEqual(val, body.get('properties')[key])
@@ -54,14 +54,14 @@
# Register a new remote image
resp, body = self.create_image(name='New Remote Image',
container_format='bare',
- disk_format='raw', is_public=True,
+ disk_format='raw', is_public=False,
location='http://example.com'
'/someimage.iso',
properties={'key1': 'value1',
'key2': 'value2'})
self.assertIn('id', body)
self.assertEqual('New Remote Image', body.get('name'))
- self.assertTrue(body.get('is_public'))
+ self.assertFalse(body.get('is_public'))
self.assertEqual('active', body.get('status'))
properties = body.get('properties')
self.assertEqual(properties['key1'], 'value1')
@@ -71,12 +71,12 @@
def test_register_http_image(self):
resp, body = self.create_image(name='New Http Image',
container_format='bare',
- disk_format='raw', is_public=True,
+ disk_format='raw', is_public=False,
copy_from=CONF.image.http_image)
self.assertIn('id', body)
image_id = body.get('id')
self.assertEqual('New Http Image', body.get('name'))
- self.assertTrue(body.get('is_public'))
+ self.assertFalse(body.get('is_public'))
self.client.wait_for_image_status(image_id, 'active')
resp, body = self.client.get_image(image_id)
self.assertEqual(resp['status'], '200')
@@ -88,12 +88,12 @@
resp, body = self.create_image(name='New_image_with_min_ram',
container_format='bare',
disk_format='raw',
- is_public=True,
+ is_public=False,
min_ram=40,
properties=properties)
self.assertIn('id', body)
self.assertEqual('New_image_with_min_ram', body.get('name'))
- self.assertTrue(body.get('is_public'))
+ self.assertFalse(body.get('is_public'))
self.assertEqual('queued', body.get('status'))
self.assertEqual(40, body.get('min_ram'))
for key, val in properties.items():
@@ -147,7 +147,7 @@
resp, image = cls.create_image(name=name,
container_format=container_format,
disk_format=disk_format,
- is_public=True,
+ is_public=False,
location=location)
image_id = image['id']
return image_id
@@ -165,7 +165,7 @@
resp, image = cls.create_image(name=name,
container_format=container_format,
disk_format=disk_format,
- is_public=True, data=image_file)
+ is_public=False, data=image_file)
image_id = image['id']
return image_id
@@ -247,11 +247,17 @@
@classmethod
@test.safe_setup
def setUpClass(cls):
- super(ListSnapshotImagesTest, cls).setUpClass()
+ # This test class only uses nova v3 api to create snapshot
+ # as the similar test which uses nova v2 api already exists
+ # in nova v2 compute images api tests.
+ # Since nova v3 doesn't have images api proxy, this test
+ # class was added in the image api tests.
if not CONF.compute_feature_enabled.api_v3:
- cls.servers_client = cls.os.servers_client
- else:
- cls.servers_client = cls.os.servers_v3_client
+ skip_msg = ("%s skipped as nova v3 api is not available" %
+ cls.__name__)
+ raise cls.skipException(skip_msg)
+ super(ListSnapshotImagesTest, cls).setUpClass()
+ cls.servers_client = cls.os.servers_v3_client
cls.servers = []
# We add a few images here to test the listing functionality of
# the images API
@@ -264,7 +270,7 @@
resp, image = cls.create_image(name="Standard Image",
container_format='ami',
disk_format='ami',
- is_public=True, data=image_file)
+ is_public=False, data=image_file)
cls.image_id = image['id']
cls.client.wait_for_image_status(image['id'], 'active')
@@ -281,8 +287,7 @@
cls.servers.append(server)
cls.servers_client.wait_for_server_status(
server['id'], 'ACTIVE')
- resp, image = cls.servers_client.create_image(
- server['id'], name)
+ resp, _ = cls.servers_client.create_image(server['id'], name)
image_id = data_utils.parse_image_id(resp['location'])
cls.created_images.append(image_id)
cls.client.wait_for_image_status(image_id,
@@ -290,6 +295,7 @@
return image_id
@test.attr(type='gate')
+ @test.services('compute')
def test_index_server_id(self):
# The images should contain images filtered by server id
resp, images = self.client.image_list_detail(
@@ -299,6 +305,7 @@
self.assertEqual(self.snapshot_set, result_set)
@test.attr(type='gate')
+ @test.services('compute')
def test_index_type(self):
# The list of servers should be filtered by image type
params = {'image_type': 'snapshot'}
@@ -309,6 +316,7 @@
self.assertIn(self.snapshot, result_set)
@test.attr(type='gate')
+ @test.services('compute')
def test_index_limit(self):
# Verify only the expected number of results are returned
resp, images = self.client.image_list_detail(limit=1)
@@ -317,6 +325,7 @@
self.assertEqual(1, len(images))
@test.attr(type='gate')
+ @test.services('compute')
def test_index_by_change_since(self):
# Verify an update image is returned
# Becoming ACTIVE will modify the updated time
@@ -352,7 +361,7 @@
resp, image = cls.create_image(name=name,
container_format=container_format,
disk_format=disk_format,
- is_public=True, data=image_file,
+ is_public=False, data=image_file,
properties={'key1': 'value1'})
image_id = image['id']
return image_id
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index 2592409..37dc163 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -35,17 +35,19 @@
upload the image file, get image and get image file api's
"""
+ uuid = '00000000-1111-2222-3333-444455556666'
image_name = data_utils.rand_name('image')
resp, body = self.create_image(name=image_name,
container_format='bare',
disk_format='raw',
- visibility='public')
+ visibility='private',
+ ramdisk_id=uuid)
self.assertIn('id', body)
image_id = body.get('id')
self.assertIn('name', body)
self.assertEqual(image_name, body['name'])
self.assertIn('visibility', body)
- self.assertEqual('public', body['visibility'])
+ self.assertEqual('private', body['visibility'])
self.assertIn('status', body)
self.assertEqual('queued', body['status'])
@@ -60,6 +62,7 @@
self.assertEqual(200, resp.status)
self.assertEqual(image_id, body['id'])
self.assertEqual(image_name, body['name'])
+ self.assertEqual(uuid, body['ramdisk_id'])
self.assertIn('size', body)
self.assertEqual(1024, body.get('size'))
@@ -77,7 +80,7 @@
resp, body = self.client.create_image(name=image_name,
container_format='bare',
disk_format='raw',
- visibility='public')
+ visibility='private')
self.assertEqual(201, resp.status)
image_id = body['id']
@@ -99,7 +102,7 @@
resp, body = self.client.create_image(name=image_name,
container_format='bare',
disk_format='iso',
- visibility='public')
+ visibility='private')
self.assertEqual(201, resp.status)
self.addCleanup(self.client.delete_image, body['id'])
self.assertEqual('queued', body['status'])
@@ -113,10 +116,8 @@
# Update Image
new_image_name = data_utils.rand_name('new-image')
- new_visibility = 'private'
resp, body = self.client.update_image(image_id, [
- dict(replace='/name', value=new_image_name),
- dict(replace='/visibility', value=new_visibility)])
+ dict(replace='/name', value=new_image_name)])
self.assertEqual(200, resp.status)
@@ -126,7 +127,6 @@
self.assertEqual(200, resp.status)
self.assertEqual(image_id, body['id'])
self.assertEqual(new_image_name, body['name'])
- self.assertEqual(new_visibility, body['visibility'])
class ListImagesTest(base.BaseV2ImageTest):
@@ -160,7 +160,7 @@
resp, body = cls.create_image(name=name,
container_format=container_format,
disk_format=disk_format,
- visibility='public')
+ visibility='private')
image_id = body['id']
resp, body = cls.client.store_image(image_id, data=image_file)
@@ -202,8 +202,8 @@
@test.attr(type='gate')
def test_list_images_param_visibility(self):
- # Test to get all images with visibility = public
- params = {"visibility": "public"}
+ # Test to get all images with visibility = private
+ params = {"visibility": "private"}
self._list_by_param_value_and_assert(params)
@test.attr(type='gate')
diff --git a/tempest/api/image/v2/test_images_tags.py b/tempest/api/image/v2/test_images_tags.py
index 504c0e8..dec3353 100644
--- a/tempest/api/image/v2/test_images_tags.py
+++ b/tempest/api/image/v2/test_images_tags.py
@@ -23,7 +23,7 @@
def test_update_delete_tags_for_image(self):
resp, body = self.create_image(container_format='bare',
disk_format='raw',
- visibility='public')
+ visibility='private')
image_id = body['id']
tag = data_utils.rand_name('tag-')
self.addCleanup(self.client.delete_image, image_id)
diff --git a/tempest/api/image/v2/test_images_tags_negative.py b/tempest/api/image/v2/test_images_tags_negative.py
index 3233db7..13cfa0a 100644
--- a/tempest/api/image/v2/test_images_tags_negative.py
+++ b/tempest/api/image/v2/test_images_tags_negative.py
@@ -35,7 +35,7 @@
# Delete non existing tag.
resp, body = self.create_image(container_format='bare',
disk_format='raw',
- is_public=True,
+ visibility='private'
)
image_id = body['id']
tag = data_utils.rand_name('non-exist-tag-')
diff --git a/tempest/api/network/admin/test_floating_ips_admin_actions.py b/tempest/api/network/admin/test_floating_ips_admin_actions.py
new file mode 100644
index 0000000..5728432
--- /dev/null
+++ b/tempest/api/network/admin/test_floating_ips_admin_actions.py
@@ -0,0 +1,72 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.network import base
+from tempest import clients
+from tempest import config
+from tempest import test
+
+CONF = config.CONF
+
+
+class FloatingIPAdminTestJSON(base.BaseAdminNetworkTest):
+ _interface = 'json'
+ force_tenant_isolation = True
+
+ @classmethod
+ def setUpClass(cls):
+ super(FloatingIPAdminTestJSON, cls).setUpClass()
+ cls.ext_net_id = CONF.network.public_network_id
+ cls.floating_ip = cls.create_floatingip(cls.ext_net_id)
+ cls.alt_manager = clients.Manager(cls.isolated_creds.get_alt_creds())
+ cls.alt_client = cls.alt_manager.network_client
+
+ @test.attr(type='smoke')
+ def test_list_floating_ips_from_admin_and_nonadmin(self):
+ # Create floating ip from admin user
+ resp, floating_ip_admin = self.admin_client.create_floatingip(
+ floating_network_id=self.ext_net_id)
+ self.assertEqual('201', resp['status'])
+ self.addCleanup(self.admin_client.delete_floatingip,
+ floating_ip_admin['floatingip']['id'])
+ # Create floating ip from alt user
+ resp, body = self.alt_client.create_floatingip(
+ floating_network_id=self.ext_net_id)
+ self.assertEqual('201', resp['status'])
+ floating_ip_alt = body['floatingip']
+ self.addCleanup(self.alt_client.delete_floatingip,
+ floating_ip_alt['id'])
+ # List floating ips from admin
+ resp, body = self.admin_client.list_floatingips()
+ self.assertEqual('200', resp['status'])
+ floating_ip_ids_admin = [f['id'] for f in body['floatingips']]
+ # Check that admin sees all floating ips
+ self.assertIn(self.floating_ip['id'], floating_ip_ids_admin)
+ self.assertIn(floating_ip_admin['floatingip']['id'],
+ floating_ip_ids_admin)
+ self.assertIn(floating_ip_alt['id'], floating_ip_ids_admin)
+ # List floating ips from nonadmin
+ resp, body = self.client.list_floatingips()
+ floating_ip_ids = [f['id'] for f in body['floatingips']]
+ # Check that nonadmin user doesn't see floating ip created from admin
+ # and floating ip that is created in another tenant (alt user)
+ self.assertIn(self.floating_ip['id'], floating_ip_ids)
+ self.assertNotIn(floating_ip_admin['floatingip']['id'],
+ floating_ip_ids)
+ self.assertNotIn(floating_ip_alt['id'], floating_ip_ids)
+
+
+class FloatingIPAdminTestXML(FloatingIPAdminTestJSON):
+ _interface = 'xml'
diff --git a/tempest/api/network/admin/test_l3_agent_scheduler.py b/tempest/api/network/admin/test_l3_agent_scheduler.py
index f4050c5..3b05f42 100644
--- a/tempest/api/network/admin/test_l3_agent_scheduler.py
+++ b/tempest/api/network/admin/test_l3_agent_scheduler.py
@@ -28,8 +28,9 @@
List L3 agents hosting the given router.
Add and Remove Router to L3 agent
- v2.0 of the Neutron API is assumed. It is also assumed that the following
- options are defined in the [network] section of etc/tempest.conf:
+ v2.0 of the Neutron API is assumed.
+
+ The l3_agent_scheduler extension is required for these tests.
"""
@classmethod
diff --git a/tempest/api/network/admin/test_load_balancer_admin_actions.py b/tempest/api/network/admin/test_load_balancer_admin_actions.py
index bc7f1d6..fe4fc60 100644
--- a/tempest/api/network/admin/test_load_balancer_admin_actions.py
+++ b/tempest/api/network/admin/test_load_balancer_admin_actions.py
@@ -38,9 +38,7 @@
cls.force_tenant_isolation = True
manager = cls.get_client_manager()
cls.client = manager.network_client
- username, tenant_name, passwd = cls.isolated_creds.get_primary_creds()
- cls.tenant_id = cls.os_adm.identity_client.get_tenant_by_name(
- tenant_name)['id']
+ cls.tenant_id = cls.isolated_creds.get_primary_creds().tenant_id
cls.network = cls.create_network()
cls.subnet = cls.create_subnet(cls.network)
cls.pool = cls.create_pool(data_utils.rand_name('pool-'),
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index 696a1c3..dcd9bff 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -145,7 +145,7 @@
return network
@classmethod
- def create_subnet(cls, network):
+ def create_subnet(cls, network, gateway=None):
"""Wrapper utility that returns a test subnet."""
# The cidr and mask_bits depend on the ip version.
if cls._ip_version == 4:
@@ -156,14 +156,19 @@
mask_bits = CONF.network.tenant_network_v6_mask_bits
# Find a cidr that is not in use yet and create a subnet with it
for subnet_cidr in cidr.subnet(mask_bits):
+ if not gateway:
+ gateway = str(netaddr.IPAddress(subnet_cidr) + 1)
try:
resp, body = cls.client.create_subnet(
network_id=network['id'],
cidr=str(subnet_cidr),
- ip_version=cls._ip_version)
+ ip_version=cls._ip_version,
+ gateway_ip=gateway)
break
except exceptions.BadRequest as e:
is_overlapping_cidr = 'overlaps with another subnet' in str(e)
+ # Unset gateway value if there is an overlapping subnet
+ gateway = None
if not is_overlapping_cidr:
raise
else:
@@ -174,14 +179,22 @@
return subnet
@classmethod
- def create_port(cls, network):
+ def create_port(cls, network, **kwargs):
"""Wrapper utility that returns a test port."""
- resp, body = cls.client.create_port(network_id=network['id'])
+ resp, body = cls.client.create_port(network_id=network['id'],
+ **kwargs)
port = body['port']
cls.ports.append(port)
return port
@classmethod
+ def update_port(cls, port, **kwargs):
+ """Wrapper utility that updates a test port."""
+ resp, body = cls.client.update_port(port['id'],
+ **kwargs)
+ return body['port']
+
+ @classmethod
def create_router(cls, router_name=None, admin_state_up=False,
external_network_id=None, enable_snat=None):
ext_gw_info = {}
@@ -286,7 +299,7 @@
def create_vpnservice(cls, subnet_id, router_id):
"""Wrapper utility that returns a test vpn service."""
resp, body = cls.client.create_vpnservice(
- subnet_id, router_id, admin_state_up=True,
+ subnet_id=subnet_id, router_id=router_id, admin_state_up=True,
name=data_utils.rand_name("vpnservice-"))
vpnservice = body['vpnservice']
cls.vpnservices.append(vpnservice)
@@ -295,7 +308,7 @@
@classmethod
def create_ikepolicy(cls, name):
"""Wrapper utility that returns a test ike policy."""
- resp, body = cls.client.create_ikepolicy(name)
+ resp, body = cls.client.create_ikepolicy(name=name)
ikepolicy = body['ikepolicy']
cls.ikepolicies.append(ikepolicy)
return ikepolicy
@@ -344,11 +357,7 @@
raise cls.skipException(msg)
if (CONF.compute.allow_tenant_isolation or
cls.force_tenant_isolation is True):
- creds = cls.isolated_creds.get_admin_creds()
- admin_username, admin_tenant_name, admin_password = creds
- cls.os_adm = clients.Manager(username=admin_username,
- password=admin_password,
- tenant_name=admin_tenant_name,
+ cls.os_adm = clients.Manager(cls.isolated_creds.get_admin_creds(),
interface=cls._interface)
else:
cls.os_adm = clients.ComputeAdminManager(interface=cls._interface)
diff --git a/tempest/api/network/test_allowed_address_pair.py b/tempest/api/network/test_allowed_address_pair.py
new file mode 100644
index 0000000..e0e26da
--- /dev/null
+++ b/tempest/api/network/test_allowed_address_pair.py
@@ -0,0 +1,83 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.network import base
+from tempest import test
+
+
+class AllowedAddressPairTestJSON(base.BaseNetworkTest):
+ _interface = 'json'
+
+ """
+ Tests the Neutron Allowed Address Pair API extension using the Tempest
+ ReST client. The following API operations are tested with this extension:
+
+ create port
+ list ports
+ update port
+ show port
+
+ v2.0 of the Neutron API is assumed. It is also assumed that the following
+ options are defined in the [network-feature-enabled] section of
+ etc/tempest.conf
+
+ api_extensions
+ """
+
+ @classmethod
+ @test.safe_setup
+ def setUpClass(cls):
+ super(AllowedAddressPairTestJSON, cls).setUpClass()
+ if not test.is_extension_enabled('allowed-address-pairs', 'network'):
+ msg = "Allowed Address Pairs extension not enabled."
+ raise cls.skipException(msg)
+ cls.network = cls.create_network()
+ cls.create_subnet(cls.network)
+ port = cls.create_port(cls.network)
+ cls.ip_address = port['fixed_ips'][0]['ip_address']
+ cls.mac_address = port['mac_address']
+
+ @test.attr(type='smoke')
+ def test_create_list_port_with_address_pair(self):
+ # Create port with allowed address pair attribute
+ allowed_address_pairs = [{'ip_address': self.ip_address,
+ 'mac_address': self.mac_address}]
+ resp, body = self.client.create_port(
+ network_id=self.network['id'],
+ allowed_address_pairs=allowed_address_pairs)
+ self.assertEqual('201', resp['status'])
+ port_id = body['port']['id']
+ self.addCleanup(self.client.delete_port, port_id)
+
+ # Confirm port was created with allowed address pair attribute
+ resp, body = self.client.list_ports()
+ self.assertEqual('200', resp['status'])
+ ports = body['ports']
+ port = [p for p in ports if p['id'] == port_id]
+ msg = 'Created port not found in list of ports returned by Neutron'
+ self.assertTrue(port, msg)
+ self._confirm_allowed_address_pair(port[0], self.ip_address)
+
+ def _confirm_allowed_address_pair(self, port, ip):
+ msg = 'Port allowed address pairs should not be empty'
+ self.assertTrue(port['allowed_address_pairs'], msg)
+ ip_address = port['allowed_address_pairs'][0]['ip_address']
+ mac_address = port['allowed_address_pairs'][0]['mac_address']
+ self.assertEqual(ip_address, ip)
+ self.assertEqual(mac_address, self.mac_address)
+
+
+class AllowedAddressPairTestXML(AllowedAddressPairTestJSON):
+ _interface = 'xml'
diff --git a/tempest/api/network/test_floating_ips.py b/tempest/api/network/test_floating_ips.py
index 7191940..2463654 100644
--- a/tempest/api/network/test_floating_ips.py
+++ b/tempest/api/network/test_floating_ips.py
@@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import netaddr
+
from tempest.api.network import base
from tempest.common.utils import data_utils
from tempest import config
@@ -78,6 +80,8 @@
self.assertEqual(created_floating_ip['port_id'], self.ports[0]['id'])
self.assertEqual(created_floating_ip['floating_network_id'],
self.ext_net_id)
+ self.assertIn(created_floating_ip['fixed_ip_address'],
+ [ip['ip_address'] for ip in self.ports[0]['fixed_ips']])
# Verifies the details of a floating_ip
resp, floating_ip = self.client.show_floatingip(
created_floating_ip['id'])
@@ -172,6 +176,54 @@
port_other_router['id'])
self.assertIsNotNone(updated_floating_ip['fixed_ip_address'])
+ @test.attr(type='smoke')
+ def test_create_floating_ip_specifying_a_fixed_ip_address(self):
+ resp, body = self.client.create_floatingip(
+ floating_network_id=self.ext_net_id,
+ port_id=self.ports[1]['id'],
+ fixed_ip_address=self.ports[1]['fixed_ips'][0]['ip_address'])
+ self.assertEqual('201', resp['status'])
+ created_floating_ip = body['floatingip']
+ self.addCleanup(self.client.delete_floatingip,
+ created_floating_ip['id'])
+ self.assertIsNotNone(created_floating_ip['id'])
+ self.assertEqual(created_floating_ip['fixed_ip_address'],
+ self.ports[1]['fixed_ips'][0]['ip_address'])
+ resp, floating_ip = self.client.update_floatingip(
+ created_floating_ip['id'], port_id=None)
+ self.assertEqual('200', resp['status'])
+ self.assertIsNone(floating_ip['floatingip']['port_id'])
+
+ @test.attr(type='smoke')
+ def test_create_update_floatingip_with_port_multiple_ip_address(self):
+ # Find out ips that can be used for tests
+ ips = list(netaddr.IPNetwork(self.subnet['cidr']))
+ list_ips = [str(ip) for ip in ips[-3:-1]]
+ fixed_ips = [{'ip_address': list_ips[0]}, {'ip_address': list_ips[1]}]
+ # Create port
+ resp, body = self.client.create_port(network_id=self.network['id'],
+ fixed_ips=fixed_ips)
+ self.assertEqual('201', resp['status'])
+ port = body['port']
+ self.addCleanup(self.client.delete_port, port['id'])
+ # Create floating ip
+ resp, body = self.client.create_floatingip(
+ floating_network_id=self.ext_net_id, port_id=port['id'],
+ fixed_ip_address=list_ips[0])
+ self.assertEqual('201', resp['status'])
+ floating_ip = body['floatingip']
+ self.addCleanup(self.client.delete_floatingip, floating_ip['id'])
+ self.assertIsNotNone(floating_ip['id'])
+ self.assertEqual(floating_ip['fixed_ip_address'], list_ips[0])
+ # Update floating ip
+ resp, body = self.client.update_floatingip(
+ floating_ip['id'], port_id=port['id'],
+ fixed_ip_address=list_ips[1])
+ self.assertEqual('200', resp['status'])
+ update_floating_ip = body['floatingip']
+ self.assertEqual(update_floating_ip['fixed_ip_address'],
+ list_ips[1])
+
class FloatingIPTestXML(FloatingIPTestJSON):
_interface = 'xml'
diff --git a/tempest/api/network/test_fwaas_extensions.py b/tempest/api/network/test_fwaas_extensions.py
index 0647069..555cbda 100644
--- a/tempest/api/network/test_fwaas_extensions.py
+++ b/tempest/api/network/test_fwaas_extensions.py
@@ -14,9 +14,12 @@
from tempest.api.network import base
from tempest.common.utils import data_utils
+from tempest import config
from tempest import exceptions
from tempest import test
+CONF = config.CONF
+
class FWaaSExtensionTestJSON(base.BaseNetworkTest):
_interface = 'json'
@@ -67,6 +70,20 @@
except exceptions.NotFound:
pass
+ self.client.wait_for_resource_deletion('firewall', fw_id)
+
+ def _wait_for_active(self, fw_id):
+ def _wait():
+ resp, firewall = self.client.show_firewall(fw_id)
+ self.assertEqual('200', resp['status'])
+ firewall = firewall['firewall']
+ return firewall['status'] == 'ACTIVE'
+
+ if not test.call_until_true(_wait, CONF.network.build_timeout,
+ CONF.network.build_interval):
+ m = 'Timed out waiting for firewall %s to become ACTIVE.' % fw_id
+ raise exceptions.TimeoutException(m)
+
@test.attr(type='smoke')
def test_list_firewall_rules(self):
# List firewall rules
@@ -168,6 +185,15 @@
@test.attr(type='smoke')
def test_create_show_delete_firewall(self):
+ # Create tenant network resources required for an ACTIVE firewall
+ network = self.create_network()
+ subnet = self.create_subnet(network)
+ router = self.create_router(
+ data_utils.rand_name('router-'),
+ admin_state_up=True)
+ self.client.add_router_interface_with_subnet_id(
+ router['id'], subnet['id'])
+
# Create firewall
resp, body = self.client.create_firewall(
name=data_utils.rand_name("firewall"),
@@ -177,11 +203,16 @@
firewall_id = created_firewall['id']
self.addCleanup(self._try_delete_firewall, firewall_id)
+ self._wait_for_active(firewall_id)
+
# show a created firewall
resp, firewall = self.client.show_firewall(firewall_id)
self.assertEqual('200', resp['status'])
firewall = firewall['firewall']
+
for key, value in firewall.iteritems():
+ if key == 'status':
+ continue
self.assertEqual(created_firewall[key], value)
# list firewall
@@ -198,9 +229,6 @@
# Delete firewall
resp, _ = self.client.delete_firewall(firewall_id)
self.assertEqual('204', resp['status'])
- # Confirm deletion
- # TODO(raies): Confirm deletion can be done only when,
- # deleted firewall status is not "PENDING_DELETE".
class FWaaSExtensionTestXML(FWaaSExtensionTestJSON):
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index de44f4d..ac0fd11 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -216,6 +216,39 @@
# it from the list.
self.subnets.pop()
+ @test.attr(type='smoke')
+ def test_create_delete_subnet_with_gw(self):
+ gateway = '10.100.0.13'
+ name = data_utils.rand_name('network-')
+ resp, body = self.client.create_network(name=name)
+ self.assertEqual('201', resp['status'])
+ network = body['network']
+ net_id = network['id']
+ subnet = self.create_subnet(network, gateway)
+ # Verifies Subnet GW in IPv4
+ self.assertEqual(subnet['gateway_ip'], gateway)
+ # Delete network and subnet
+ resp, body = self.client.delete_network(net_id)
+ self.assertEqual('204', resp['status'])
+ self.subnets.pop()
+
+ @test.attr(type='smoke')
+ def test_create_delete_subnet_without_gw(self):
+ net = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
+ gateway_ip = str(netaddr.IPAddress(net.first + 1))
+ name = data_utils.rand_name('network-')
+ resp, body = self.client.create_network(name=name)
+ self.assertEqual('201', resp['status'])
+ network = body['network']
+ net_id = network['id']
+ subnet = self.create_subnet(network)
+ # Verifies Subnet GW in IPv4
+ self.assertEqual(subnet['gateway_ip'], gateway_ip)
+ # Delete network and subnet
+ resp, body = self.client.delete_network(net_id)
+ self.assertEqual('204', resp['status'])
+ self.subnets.pop()
+
class NetworksTestXML(NetworksTestJSON):
_interface = 'xml'
@@ -364,11 +397,41 @@
@classmethod
def setUpClass(cls):
- super(NetworksIpV6TestJSON, cls).setUpClass()
if not CONF.network_feature_enabled.ipv6:
- cls.tearDownClass()
skip_msg = "IPv6 Tests are disabled."
raise cls.skipException(skip_msg)
+ super(NetworksIpV6TestJSON, cls).setUpClass()
+
+ @test.attr(type='smoke')
+ def test_create_delete_subnet_with_gw(self):
+ gateway = '2003::2'
+ name = data_utils.rand_name('network-')
+ resp, body = self.client.create_network(name=name)
+ self.assertEqual('201', resp['status'])
+ network = body['network']
+ net_id = network['id']
+ subnet = self.create_subnet(network, gateway)
+ # Verifies Subnet GW in IPv6
+ self.assertEqual(subnet['gateway_ip'], gateway)
+ # Delete network and subnet
+ resp, body = self.client.delete_network(net_id)
+ self.assertEqual('204', resp['status'])
+ self.subnets.pop()
+
+ @test.attr(type='smoke')
+ def test_create_delete_subnet_without_gw(self):
+ name = data_utils.rand_name('network-')
+ resp, body = self.client.create_network(name=name)
+ self.assertEqual('201', resp['status'])
+ network = body['network']
+ net_id = network['id']
+ subnet = self.create_subnet(network)
+ # Verifies Subnet GW in IPv6
+ self.assertEqual(subnet['gateway_ip'], '2003::1')
+ # Delete network and subnet
+ resp, body = self.client.delete_network(net_id)
+ self.assertEqual('204', resp['status'])
+ self.subnets.pop()
class NetworksIpV6TestXML(NetworksIpV6TestJSON):
diff --git a/tempest/api/network/test_networks_negative.py b/tempest/api/network/test_networks_negative.py
index 89c8a9f..53dfc52 100644
--- a/tempest/api/network/test_networks_negative.py
+++ b/tempest/api/network/test_networks_negative.py
@@ -17,37 +17,37 @@
from tempest.api.network import base
from tempest.common.utils import data_utils
from tempest import exceptions
-from tempest.test import attr
+from tempest import test
class NetworksNegativeTestJSON(base.BaseNetworkTest):
_interface = 'json'
- @attr(type=['negative', 'smoke'])
+ @test.attr(type=['negative', 'smoke'])
def test_show_non_existent_network(self):
non_exist_id = data_utils.rand_name('network')
self.assertRaises(exceptions.NotFound, self.client.show_network,
non_exist_id)
- @attr(type=['negative', 'smoke'])
+ @test.attr(type=['negative', 'smoke'])
def test_show_non_existent_subnet(self):
non_exist_id = data_utils.rand_name('subnet')
self.assertRaises(exceptions.NotFound, self.client.show_subnet,
non_exist_id)
- @attr(type=['negative', 'smoke'])
+ @test.attr(type=['negative', 'smoke'])
def test_show_non_existent_port(self):
non_exist_id = data_utils.rand_name('port')
self.assertRaises(exceptions.NotFound, self.client.show_port,
non_exist_id)
- @attr(type=['negative', 'smoke'])
+ @test.attr(type=['negative', 'smoke'])
def test_update_non_existent_network(self):
non_exist_id = data_utils.rand_name('network')
self.assertRaises(exceptions.NotFound, self.client.update_network,
non_exist_id, name="new_name")
- @attr(type=['negative', 'smoke'])
+ @test.attr(type=['negative', 'smoke'])
def test_delete_non_existent_network(self):
non_exist_id = data_utils.rand_name('network')
self.assertRaises(exceptions.NotFound, self.client.delete_network,
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index 68f617b..e6e6ea1 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -57,6 +57,8 @@
resp, body = self.client.create_port(network_id=self.network['id'])
self.assertEqual('201', resp['status'])
port = body['port']
+ # Schedule port deletion with verification upon test completion
+ self.addCleanup(self._delete_port, port['id'])
self.assertTrue(port['admin_state_up'])
# Verify port update
new_name = "New_Port"
@@ -68,9 +70,6 @@
updated_port = body['port']
self.assertEqual(updated_port['name'], new_name)
self.assertFalse(updated_port['admin_state_up'])
- # Verify port deletion
- resp, body = self.client.delete_port(port['id'])
- self.assertEqual('204', resp['status'])
@test.attr(type='smoke')
def test_show_port(self):
@@ -146,6 +145,29 @@
for port in ports:
self.assertEqual(sorted(fields), sorted(port.keys()))
+ @test.attr(type='smoke')
+ def test_update_port_with_second_ip(self):
+ # Create a network with two subnets
+ network = self.create_network()
+ subnet_1 = self.create_subnet(network)
+ subnet_2 = self.create_subnet(network)
+ fixed_ip_1 = [{'subnet_id': subnet_1['id']}]
+ fixed_ip_2 = [{'subnet_id': subnet_2['id']}]
+
+ # Create a port with a single IP address from first subnet
+ port = self.create_port(network,
+ fixed_ips=fixed_ip_1)
+ self.assertEqual(1, len(port['fixed_ips']))
+
+ # Update the port with a second IP address from second subnet
+ fixed_ips = fixed_ip_1 + fixed_ip_2
+ port = self.update_port(port, fixed_ips=fixed_ips)
+ self.assertEqual(2, len(port['fixed_ips']))
+
+ # Update the port to return to a single IP address
+ port = self.update_port(port, fixed_ips=fixed_ip_1)
+ self.assertEqual(1, len(port['fixed_ips']))
+
class PortsTestXML(PortsTestJSON):
_interface = 'xml'
@@ -266,11 +288,10 @@
@classmethod
def setUpClass(cls):
- super(PortsAdminExtendedAttrsIpV6TestJSON, cls).setUpClass()
if not CONF.network_feature_enabled.ipv6:
- cls.tearDownClass()
skip_msg = "IPv6 Tests are disabled."
raise cls.skipException(skip_msg)
+ super(PortsAdminExtendedAttrsIpV6TestJSON, cls).setUpClass()
class PortsAdminExtendedAttrsIpV6TestXML(
diff --git a/tempest/api/network/test_routers.py b/tempest/api/network/test_routers.py
index 4cc0338..7605b8a 100644
--- a/tempest/api/network/test_routers.py
+++ b/tempest/api/network/test_routers.py
@@ -262,10 +262,25 @@
self.addCleanup(
self._delete_extra_routes,
self.router['id'])
- # Update router extra route
+ # Update router extra route, second ip of the range is
+ # used as next hop
cidr = netaddr.IPNetwork(self.subnet['cidr'])
+ next_hop = str(cidr[2])
+ destination = str(self.subnet['cidr'])
resp, extra_route = self.client.update_extra_routes(
- self.router['id'], str(cidr[0]), str(self.subnet['cidr']))
+ self.router['id'], next_hop, destination)
+ self.assertEqual('200', resp['status'])
+ self.assertEqual(1, len(extra_route['router']['routes']))
+ self.assertEqual(destination,
+ extra_route['router']['routes'][0]['destination'])
+ self.assertEqual(next_hop,
+ extra_route['router']['routes'][0]['nexthop'])
+ resp, show_body = self.client.show_router(self.router['id'])
+ self.assertEqual('200', resp['status'])
+ self.assertEqual(destination,
+ show_body['router']['routes'][0]['destination'])
+ self.assertEqual(next_hop,
+ show_body['router']['routes'][0]['nexthop'])
def _delete_extra_routes(self, router_id):
resp, _ = self.client.delete_extra_routes(router_id)
diff --git a/tempest/api/network/test_security_groups.py b/tempest/api/network/test_security_groups.py
index 3e26f46..b98cea1 100644
--- a/tempest/api/network/test_security_groups.py
+++ b/tempest/api/network/test_security_groups.py
@@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import six
+
from tempest.api.network import base_security_groups as base
from tempest.common.utils import data_utils
from tempest import test
@@ -84,22 +86,25 @@
direction='ingress'
)
self.assertEqual('201', resp['status'])
- self.addCleanup(self._delete_security_group_rule,
- rule_create_body['security_group_rule']['id']
- )
- # Show details of the created security rule
- resp, show_rule_body = self.client.show_security_group_rule(
- rule_create_body['security_group_rule']['id']
- )
- self.assertEqual('200', resp['status'])
+ # Show details of the created security rule
+ resp, show_rule_body = self.client.show_security_group_rule(
+ rule_create_body['security_group_rule']['id']
+ )
+ self.assertEqual('200', resp['status'])
+ create_dict = rule_create_body['security_group_rule']
+ for key, value in six.iteritems(create_dict):
+ self.assertEqual(value,
+ show_rule_body['security_group_rule'][key],
+ "%s does not match." % key)
- # List rules and verify created rule is in response
- resp, rule_list_body = self.client.list_security_group_rules()
- self.assertEqual('200', resp['status'])
- rule_list = [rule['id']
- for rule in rule_list_body['security_group_rules']]
- self.assertIn(rule_create_body['security_group_rule']['id'], rule_list)
+ # List rules and verify created rule is in response
+ resp, rule_list_body = self.client.list_security_group_rules()
+ self.assertEqual('200', resp['status'])
+ rule_list = [rule['id']
+ for rule in rule_list_body['security_group_rules']]
+ self.assertIn(rule_create_body['security_group_rule']['id'],
+ rule_list)
@test.attr(type='smoke')
def test_create_security_group_rule_with_additional_args(self):
@@ -122,9 +127,6 @@
self.assertEqual('201', resp['status'])
sec_group_rule = rule_create_body['security_group_rule']
- self.addCleanup(self._delete_security_group_rule,
- sec_group_rule['id']
- )
self.assertEqual(sec_group_rule['direction'], direction)
self.assertEqual(sec_group_rule['protocol'], protocol)
diff --git a/tempest/api/network/test_vpnaas_extensions.py b/tempest/api/network/test_vpnaas_extensions.py
index 7edaaf8..a49e944 100644
--- a/tempest/api/network/test_vpnaas_extensions.py
+++ b/tempest/api/network/test_vpnaas_extensions.py
@@ -82,8 +82,8 @@
def test_create_update_delete_vpn_service(self):
# Creates a VPN service
name = data_utils.rand_name('vpn-service-')
- resp, body = self.client.create_vpnservice(self.subnet['id'],
- self.router['id'],
+ resp, body = self.client.create_vpnservice(subnet_id=self.subnet['id'],
+ router_id=self.router['id'],
name=name,
admin_state_up=True)
self.assertEqual('201', resp['status'])
@@ -134,7 +134,7 @@
# Creates a IKE policy
name = data_utils.rand_name('ike-policy-')
resp, body = (self.client.create_ikepolicy(
- name,
+ name=name,
ike_version="v1",
encryption_algorithm="aes-128",
auth_algorithm="sha1"))
diff --git a/tempest/api/object_storage/base.py b/tempest/api/object_storage/base.py
index 45c895b..6b18182 100644
--- a/tempest/api/object_storage/base.py
+++ b/tempest/api/object_storage/base.py
@@ -38,23 +38,12 @@
cls.__name__, network_resources=cls.network_resources)
if CONF.compute.allow_tenant_isolation:
# Get isolated creds for normal user
- creds = cls.isolated_creds.get_primary_creds()
- username, tenant_name, password = creds
- cls.os = clients.Manager(username=username,
- password=password,
- tenant_name=tenant_name)
+ cls.os = clients.Manager(cls.isolated_creds.get_primary_creds())
# Get isolated creds for admin user
- admin_creds = cls.isolated_creds.get_admin_creds()
- admin_username, admin_tenant_name, admin_password = admin_creds
- cls.os_admin = clients.Manager(username=admin_username,
- password=admin_password,
- tenant_name=admin_tenant_name)
+ cls.os_admin = clients.Manager(
+ cls.isolated_creds.get_admin_creds())
# Get isolated creds for alt user
- alt_creds = cls.isolated_creds.get_alt_creds()
- alt_username, alt_tenant, alt_password = alt_creds
- cls.os_alt = clients.Manager(username=alt_username,
- password=alt_password,
- tenant_name=alt_tenant)
+ cls.os_alt = clients.Manager(cls.isolated_creds.get_alt_creds())
# Add isolated users to operator role so that they can create a
# container in swift.
cls._assign_member_role()
@@ -92,8 +81,8 @@
@classmethod
def _assign_member_role(cls):
- primary_user = cls.isolated_creds.get_primary_user()
- alt_user = cls.isolated_creds.get_alt_user()
+ primary_creds = cls.isolated_creds.get_primary_creds()
+ alt_creds = cls.isolated_creds.get_alt_creds()
swift_role = CONF.object_storage.operator_role
try:
resp, roles = cls.os_admin.identity_client.list_roles()
@@ -101,9 +90,9 @@
except StopIteration:
msg = "No role named %s found" % swift_role
raise exceptions.NotFound(msg)
- for user in [primary_user, alt_user]:
- cls.os_admin.identity_client.assign_user_role(user['tenantId'],
- user['id'],
+ for creds in [primary_creds, alt_creds]:
+ cls.os_admin.identity_client.assign_user_role(creds.tenant_id,
+ creds.user_id,
role['id'])
@classmethod
diff --git a/tempest/api/object_storage/test_account_quotas.py b/tempest/api/object_storage/test_account_quotas.py
index d919245..19e3068 100644
--- a/tempest/api/object_storage/test_account_quotas.py
+++ b/tempest/api/object_storage/test_account_quotas.py
@@ -27,6 +27,7 @@
class AccountQuotasTest(base.BaseObjectTest):
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(AccountQuotasTest, cls).setUpClass()
cls.container_name = data_utils.rand_name(name="TestContainer")
@@ -34,10 +35,7 @@
cls.data.setup_test_user()
- cls.os_reselleradmin = clients.Manager(
- cls.data.test_user,
- cls.data.test_password,
- cls.data.test_tenant)
+ cls.os_reselleradmin = clients.Manager(cls.data.test_credentials)
# Retrieve the ResellerAdmin role id
reseller_role_id = None
@@ -49,15 +47,11 @@
msg = "No ResellerAdmin role found"
raise exceptions.NotFound(msg)
- # Retrieve the ResellerAdmin tenant id
- _, users = cls.os_admin.identity_client.get_users()
- reseller_user_id = next(usr['id'] for usr in users if usr['name']
- == cls.data.test_user)
+ # Retrieve the ResellerAdmin user id
+ reseller_user_id = cls.data.test_credentials.user_id
# Retrieve the ResellerAdmin tenant id
- _, tenants = cls.os_admin.identity_client.list_tenants()
- reseller_tenant_id = next(tnt['id'] for tnt in tenants if tnt['name']
- == cls.data.test_tenant)
+ reseller_tenant_id = cls.data.test_credentials.tenant_id
# Assign the newly created user the appropriate ResellerAdmin role
cls.os_admin.identity_client.assign_user_role(
@@ -101,7 +95,8 @@
@classmethod
def tearDownClass(cls):
- cls.delete_containers([cls.container_name])
+ if hasattr(cls, "container_name"):
+ cls.delete_containers([cls.container_name])
cls.data.teardown_all()
super(AccountQuotasTest, cls).tearDownClass()
diff --git a/tempest/api/object_storage/test_account_quotas_negative.py b/tempest/api/object_storage/test_account_quotas_negative.py
index 5a79529..6afd381 100644
--- a/tempest/api/object_storage/test_account_quotas_negative.py
+++ b/tempest/api/object_storage/test_account_quotas_negative.py
@@ -27,6 +27,7 @@
class AccountQuotasNegativeTest(base.BaseObjectTest):
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(AccountQuotasNegativeTest, cls).setUpClass()
cls.container_name = data_utils.rand_name(name="TestContainer")
@@ -34,10 +35,7 @@
cls.data.setup_test_user()
- cls.os_reselleradmin = clients.Manager(
- cls.data.test_user,
- cls.data.test_password,
- cls.data.test_tenant)
+ cls.os_reselleradmin = clients.Manager(cls.data.test_credentials)
# Retrieve the ResellerAdmin role id
reseller_role_id = None
@@ -50,14 +48,10 @@
raise exceptions.NotFound(msg)
# Retrieve the ResellerAdmin tenant id
- _, users = cls.os_admin.identity_client.get_users()
- reseller_user_id = next(usr['id'] for usr in users if usr['name']
- == cls.data.test_user)
+ reseller_user_id = cls.data.test_credentials.user_id
# Retrieve the ResellerAdmin tenant id
- _, tenants = cls.os_admin.identity_client.list_tenants()
- reseller_tenant_id = next(tnt['id'] for tnt in tenants if tnt['name']
- == cls.data.test_tenant)
+ reseller_tenant_id = cls.data.test_credentials.tenant_id
# Assign the newly created user the appropriate ResellerAdmin role
cls.os_admin.identity_client.assign_user_role(
@@ -100,7 +94,8 @@
@classmethod
def tearDownClass(cls):
- cls.delete_containers([cls.container_name])
+ if hasattr(cls, "container_name"):
+ cls.delete_containers([cls.container_name])
cls.data.teardown_all()
super(AccountQuotasNegativeTest, cls).tearDownClass()
diff --git a/tempest/api/object_storage/test_account_services.py b/tempest/api/object_storage/test_account_services.py
index 4b895d8..d615374 100644
--- a/tempest/api/object_storage/test_account_services.py
+++ b/tempest/api/object_storage/test_account_services.py
@@ -29,10 +29,13 @@
class AccountTest(base.BaseObjectTest):
+
+ containers = []
+
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(AccountTest, cls).setUpClass()
- cls.containers = []
for i in moves.xrange(ord('a'), ord('f') + 1):
name = data_utils.rand_name(name='%s-' % chr(i))
cls.container_client.create_container(name)
@@ -64,9 +67,7 @@
self.data.setup_test_user()
os_test_user = clients.Manager(
- self.data.test_user,
- self.data.test_password,
- self.data.test_tenant)
+ self.data.test_credentials)
# Retrieve the id of an operator role of object storage
test_role_id = None
diff --git a/tempest/api/object_storage/test_account_services_negative.py b/tempest/api/object_storage/test_account_services_negative.py
index 71eaab5..490672d 100644
--- a/tempest/api/object_storage/test_account_services_negative.py
+++ b/tempest/api/object_storage/test_account_services_negative.py
@@ -17,20 +17,18 @@
from tempest.api.object_storage import base
from tempest import clients
from tempest import exceptions
-from tempest.test import attr
+from tempest import test
class AccountNegativeTest(base.BaseObjectTest):
- @attr(type=['negative', 'gate'])
+ @test.attr(type=['negative', 'gate'])
def test_list_containers_with_non_authorized_user(self):
# list containers using non-authorized user
# create user
self.data.setup_test_user()
- test_os = clients.Manager(self.data.test_user,
- self.data.test_password,
- self.data.test_tenant)
+ test_os = clients.Manager(self.data.test_credentials)
test_auth_provider = test_os.auth_provider
# Get auth for the test user
test_auth_provider.auth_data
diff --git a/tempest/api/object_storage/test_container_acl.py b/tempest/api/object_storage/test_container_acl.py
index c865ee1..fc51504 100644
--- a/tempest/api/object_storage/test_container_acl.py
+++ b/tempest/api/object_storage/test_container_acl.py
@@ -24,9 +24,7 @@
def setUpClass(cls):
super(ObjectTestACLs, cls).setUpClass()
cls.data.setup_test_user()
- test_os = clients.Manager(cls.data.test_user,
- cls.data.test_password,
- cls.data.test_tenant)
+ test_os = clients.Manager(cls.data.test_credentials)
cls.test_auth_data = test_os.auth_provider.auth_data
@classmethod
diff --git a/tempest/api/object_storage/test_container_acl_negative.py b/tempest/api/object_storage/test_container_acl_negative.py
index 547bf87..ca53876 100644
--- a/tempest/api/object_storage/test_container_acl_negative.py
+++ b/tempest/api/object_storage/test_container_acl_negative.py
@@ -26,9 +26,7 @@
def setUpClass(cls):
super(ObjectACLsNegativeTest, cls).setUpClass()
cls.data.setup_test_user()
- test_os = clients.Manager(cls.data.test_user,
- cls.data.test_password,
- cls.data.test_tenant)
+ test_os = clients.Manager(cls.data.test_credentials)
cls.test_auth_data = test_os.auth_provider.auth_data
@classmethod
diff --git a/tempest/api/object_storage/test_container_staticweb.py b/tempest/api/object_storage/test_container_staticweb.py
index 6c71340..581c6d9 100644
--- a/tempest/api/object_storage/test_container_staticweb.py
+++ b/tempest/api/object_storage/test_container_staticweb.py
@@ -23,6 +23,7 @@
class StaticWebTest(base.BaseObjectTest):
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(StaticWebTest, cls).setUpClass()
cls.container_name = data_utils.rand_name(name="TestContainer")
@@ -45,7 +46,8 @@
@classmethod
def tearDownClass(cls):
- cls.delete_containers([cls.container_name])
+ if hasattr(cls, "container_name"):
+ cls.delete_containers([cls.container_name])
cls.data.teardown_all()
super(StaticWebTest, cls).tearDownClass()
diff --git a/tempest/api/object_storage/test_container_sync.py b/tempest/api/object_storage/test_container_sync.py
index 9bd986f..5f46d01 100644
--- a/tempest/api/object_storage/test_container_sync.py
+++ b/tempest/api/object_storage/test_container_sync.py
@@ -31,8 +31,10 @@
class ContainerSyncTest(base.BaseObjectTest):
+ clients = {}
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(ContainerSyncTest, cls).setUpClass()
cls.containers = []
@@ -50,7 +52,6 @@
int(container_sync_timeout / cls.container_sync_interval)
# define container and object clients
- cls.clients = {}
cls.clients[data_utils.rand_name(name='TestContainerSync')] = \
(cls.container_client, cls.object_client)
cls.clients[data_utils.rand_name(name='TestContainerSync')] = \
@@ -66,6 +67,7 @@
super(ContainerSyncTest, cls).tearDownClass()
@test.attr(type='slow')
+ @test.skip_because(bug='1317133')
def test_container_synchronization(self):
# container to container synchronization
# to allow/accept sync requests to/from other accounts
diff --git a/tempest/api/object_storage/test_crossdomain.py b/tempest/api/object_storage/test_crossdomain.py
index 4f399b4..d1541b9 100644
--- a/tempest/api/object_storage/test_crossdomain.py
+++ b/tempest/api/object_storage/test_crossdomain.py
@@ -29,10 +29,7 @@
# endpoint and test the healthcheck feature.
cls.data.setup_test_user()
- cls.os_test_user = clients.Manager(
- cls.data.test_user,
- cls.data.test_password,
- cls.data.test_tenant)
+ cls.os_test_user = clients.Manager(cls.data.test_credentials)
cls.xml_start = '<?xml version="1.0"?>\n' \
'<!DOCTYPE cross-domain-policy SYSTEM ' \
diff --git a/tempest/api/object_storage/test_object_formpost.py b/tempest/api/object_storage/test_object_formpost.py
index 81db252..dc5585e 100644
--- a/tempest/api/object_storage/test_object_formpost.py
+++ b/tempest/api/object_storage/test_object_formpost.py
@@ -26,7 +26,11 @@
class ObjectFormPostTest(base.BaseObjectTest):
+ metadata = {}
+ containers = []
+
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(ObjectFormPostTest, cls).setUpClass()
cls.container_name = data_utils.rand_name(name='TestContainer')
diff --git a/tempest/api/object_storage/test_object_formpost_negative.py b/tempest/api/object_storage/test_object_formpost_negative.py
index fe0c994..878bf6d 100644
--- a/tempest/api/object_storage/test_object_formpost_negative.py
+++ b/tempest/api/object_storage/test_object_formpost_negative.py
@@ -26,7 +26,11 @@
class ObjectFormPostNegativeTest(base.BaseObjectTest):
+ metadata = {}
+ containers = []
+
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(ObjectFormPostNegativeTest, cls).setUpClass()
cls.container_name = data_utils.rand_name(name='TestContainer')
diff --git a/tempest/api/object_storage/test_object_temp_url_negative.py b/tempest/api/object_storage/test_object_temp_url_negative.py
index cf24f66..7d26433 100644
--- a/tempest/api/object_storage/test_object_temp_url_negative.py
+++ b/tempest/api/object_storage/test_object_temp_url_negative.py
@@ -27,7 +27,11 @@
class ObjectTempUrlNegativeTest(base.BaseObjectTest):
+ metadata = {}
+ containers = []
+
@classmethod
+ @test.safe_setup
def setUpClass(cls):
super(ObjectTempUrlNegativeTest, cls).setUpClass()
diff --git a/tempest/api/orchestration/base.py b/tempest/api/orchestration/base.py
index c27bedf..446f4ab 100644
--- a/tempest/api/orchestration/base.py
+++ b/tempest/api/orchestration/base.py
@@ -30,7 +30,7 @@
@classmethod
def setUpClass(cls):
super(BaseOrchestrationTest, cls).setUpClass()
- cls.os = clients.OrchestrationManager()
+ cls.os = clients.Manager()
if not CONF.service_available.heat:
raise cls.skipException("Heat support is required")
cls.build_timeout = CONF.orchestration.build_timeout
@@ -41,12 +41,15 @@
cls.servers_client = cls.os.servers_client
cls.keypairs_client = cls.os.keypairs_client
cls.network_client = cls.os.network_client
+ cls.volumes_client = cls.os.volumes_client
+ cls.images_v2_client = cls.os.image_client_v2
cls.stacks = []
cls.keypairs = []
+ cls.images = []
@classmethod
def _get_default_network(cls):
- resp, networks = cls.network_client.list_networks()
+ __, networks = cls.network_client.list_networks()
for net in networks['networks']:
if net['name'] == CONF.compute.fixed_network_name:
return net
@@ -59,11 +62,14 @@
return admin_client
@classmethod
- def create_stack(cls, stack_name, template_data, parameters={}):
+ def create_stack(cls, stack_name, template_data, parameters={},
+ environment=None, files=None):
resp, body = cls.client.create_stack(
stack_name,
template=template_data,
- parameters=parameters)
+ parameters=parameters,
+ environment=environment,
+ files=files)
stack_id = resp['location'].split('/')[-1]
stack_identifier = '%s/%s' % (stack_name, stack_id)
cls.stacks.append(stack_identifier)
@@ -87,7 +93,7 @@
@classmethod
def _create_keypair(cls, name_start='keypair-heat-'):
kp_name = data_utils.rand_name(name_start)
- resp, body = cls.keypairs_client.create_keypair(kp_name)
+ __, body = cls.keypairs_client.create_keypair(kp_name)
cls.keypairs.append(kp_name)
return body
@@ -100,6 +106,25 @@
pass
@classmethod
+ def _create_image(cls, name_start='image-heat-', container_format='bare',
+ disk_format='iso'):
+ image_name = data_utils.rand_name(name_start)
+ __, body = cls.images_v2_client.create_image(image_name,
+ container_format,
+ disk_format)
+ image_id = body['id']
+ cls.images.append(image_id)
+ return body
+
+ @classmethod
+ def _clear_images(cls):
+ for image_id in cls.images:
+ try:
+ cls.images_v2_client.delete_image(image_id)
+ except exceptions.NotFound:
+ pass
+
+ @classmethod
def load_template(cls, name, ext='yaml'):
loc = ["stacks", "templates", "%s.%s" % (name, ext)]
fullpath = os.path.join(os.path.dirname(__file__), *loc)
@@ -112,6 +137,7 @@
def tearDownClass(cls):
cls._clear_stacks()
cls._clear_keypairs()
+ cls._clear_images()
super(BaseOrchestrationTest, cls).tearDownClass()
@staticmethod
@@ -136,3 +162,8 @@
return dict((r['resource_name'], r['resource_type'])
for r in resources)
+
+ def get_stack_output(self, stack_identifier, output_key):
+ resp, body = self.client.get_stack(stack_identifier)
+ self.assertEqual('200', resp['status'])
+ return self.stack_output(body, output_key)
diff --git a/tempest/api/orchestration/stacks/templates/cinder_basic.yaml b/tempest/api/orchestration/stacks/templates/cinder_basic.yaml
new file mode 100644
index 0000000..3e03a30
--- /dev/null
+++ b/tempest/api/orchestration/stacks/templates/cinder_basic.yaml
@@ -0,0 +1,24 @@
+heat_template_version: 2013-05-23
+
+resources:
+ volume:
+ type: OS::Cinder::Volume
+ properties:
+ size: 1
+ description: a descriptive description
+
+outputs:
+ status:
+ description: status
+ value: { get_attr: ['volume', 'status'] }
+
+ size:
+ description: size
+ value: { get_attr: ['volume', 'size'] }
+
+ display_description:
+ description: display_description
+ value: { get_attr: ['volume', 'display_description'] }
+
+ volume_id:
+ value: { get_resource: volume }
diff --git a/tempest/api/orchestration/stacks/templates/cinder_basic_delete_retain.yaml b/tempest/api/orchestration/stacks/templates/cinder_basic_delete_retain.yaml
new file mode 100644
index 0000000..08e3da4
--- /dev/null
+++ b/tempest/api/orchestration/stacks/templates/cinder_basic_delete_retain.yaml
@@ -0,0 +1,25 @@
+heat_template_version: 2013-05-23
+
+resources:
+ volume:
+ deletion_policy: 'Retain'
+ type: OS::Cinder::Volume
+ properties:
+ size: 1
+ description: a descriptive description
+
+outputs:
+ status:
+ description: status
+ value: { get_attr: ['volume', 'status'] }
+
+ size:
+ description: size
+ value: { get_attr: ['volume', 'size'] }
+
+ display_description:
+ description: display_description
+ value: { get_attr: ['volume', 'display_description'] }
+
+ volume_id:
+ value: { get_resource: volume }
diff --git a/tempest/api/orchestration/stacks/templates/neutron_basic.yaml b/tempest/api/orchestration/stacks/templates/neutron_basic.yaml
index 275d040..63b03f4 100644
--- a/tempest/api/orchestration/stacks/templates/neutron_basic.yaml
+++ b/tempest/api/orchestration/stacks/templates/neutron_basic.yaml
@@ -36,7 +36,6 @@
admin_state_up: false
external_gateway_info:
network: {get_param: ExternalNetworkId}
- enable_snat: false
RouterInterface:
type: OS::Neutron::RouterInterface
properties:
diff --git a/tempest/api/orchestration/stacks/templates/non_empty_stack.yaml b/tempest/api/orchestration/stacks/templates/non_empty_stack.yaml
index 58a934e..8690941 100644
--- a/tempest/api/orchestration/stacks/templates/non_empty_stack.yaml
+++ b/tempest/api/orchestration/stacks/templates/non_empty_stack.yaml
@@ -5,6 +5,8 @@
trigger:
Type: String
Default: not_yet
+ image:
+ Type: String
Resources:
fluffy:
Type: AWS::AutoScaling::LaunchConfiguration
@@ -13,7 +15,7 @@
- Tom
- Stinky
Properties:
- ImageId: not_used
+ ImageId: {Ref: image}
InstanceType: not_used
UserData:
Fn::Replace:
diff --git a/tempest/api/orchestration/stacks/templates/random_string.yaml b/tempest/api/orchestration/stacks/templates/random_string.yaml
new file mode 100644
index 0000000..dfd2342
--- /dev/null
+++ b/tempest/api/orchestration/stacks/templates/random_string.yaml
@@ -0,0 +1,18 @@
+heat_template_version: 2013-05-23
+
+parameters:
+ random_length:
+ type: number
+ default: 10
+
+resources:
+ random:
+ type: OS::Heat::RandomString
+ properties:
+ length: {get_param: random_length}
+
+outputs:
+ random_length:
+ value: {get_param: random_length}
+ random_value:
+ value: {get_attr: [random, value]}
diff --git a/tempest/api/orchestration/stacks/test_environment.py b/tempest/api/orchestration/stacks/test_environment.py
new file mode 100644
index 0000000..3911e72
--- /dev/null
+++ b/tempest/api/orchestration/stacks/test_environment.py
@@ -0,0 +1,93 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+
+from tempest.api.orchestration import base
+from tempest.common.utils import data_utils
+from tempest import config
+from tempest import test
+
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+class StackEnvironmentTest(base.BaseOrchestrationTest):
+
+ @test.attr(type='gate')
+ def test_environment_parameter(self):
+ """Test passing a stack parameter via the environment."""
+ stack_name = data_utils.rand_name('heat')
+ template = self.load_template('random_string')
+ environment = {'parameters': {'random_length': 20}}
+
+ stack_identifier = self.create_stack(stack_name, template,
+ environment=environment)
+ self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
+
+ random_len = self.get_stack_output(stack_identifier, 'random_length')
+ self.assertEqual(20, random_len)
+
+ random_value = self.get_stack_output(stack_identifier, 'random_value')
+ self.assertEqual(20, len(random_value))
+
+ @test.attr(type='gate')
+ def test_environment_provider_resource(self):
+ """Test passing resource_registry defining a provider resource."""
+ stack_name = data_utils.rand_name('heat')
+ template = '''
+heat_template_version: 2013-05-23
+resources:
+ random:
+ type: My:Random::String
+outputs:
+ random_value:
+ value: {get_attr: [random, random_value]}
+'''
+ environment = {'resource_registry':
+ {'My:Random::String': 'my_random.yaml'}}
+ files = {'my_random.yaml': self.load_template('random_string')}
+
+ stack_identifier = self.create_stack(stack_name, template,
+ environment=environment,
+ files=files)
+ self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
+
+ # random_string.yaml specifies a length of 10
+ random_value = self.get_stack_output(stack_identifier, 'random_value')
+ self.assertEqual(10, len(random_value))
+
+ @test.attr(type='gate')
+ def test_files_provider_resource(self):
+ """Test untyped defining of a provider resource via "files"."""
+ # It's also possible to specify the filename directly in the template.
+ # without adding the type alias to resource_registry
+ stack_name = data_utils.rand_name('heat')
+ template = '''
+heat_template_version: 2013-05-23
+resources:
+ random:
+ type: my_random.yaml
+outputs:
+ random_value:
+ value: {get_attr: [random, random_value]}
+'''
+ files = {'my_random.yaml': self.load_template('random_string')}
+
+ stack_identifier = self.create_stack(stack_name, template,
+ files=files)
+ self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
+
+ # random_string.yaml specifies a length of 10
+ random_value = self.get_stack_output(stack_identifier, 'random_value')
+ self.assertEqual(10, len(random_value))
diff --git a/tempest/api/orchestration/stacks/test_limits.py b/tempest/api/orchestration/stacks/test_limits.py
index 283ab2b..8ee62ab 100644
--- a/tempest/api/orchestration/stacks/test_limits.py
+++ b/tempest/api/orchestration/stacks/test_limits.py
@@ -16,7 +16,7 @@
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
-from tempest.test import attr
+from tempest import test
CONF = config.CONF
@@ -25,7 +25,7 @@
class TestServerStackLimits(base.BaseOrchestrationTest):
- @attr(type='gate')
+ @test.attr(type='gate')
def test_exceed_max_template_size_fails(self):
stack_name = data_utils.rand_name('heat')
fill = 'A' * CONF.orchestration.max_template_size
@@ -38,7 +38,7 @@
stack_name, template)
self.assertIn('Template exceeds maximum allowed size', str(ex))
- @attr(type='gate')
+ @test.attr(type='gate')
def test_exceed_max_resources_per_stack(self):
stack_name = data_utils.rand_name('heat')
# Create a big template, one resource more than the limit
diff --git a/tempest/api/orchestration/stacks/test_neutron_resources.py b/tempest/api/orchestration/stacks/test_neutron_resources.py
index b96f6ce..3086d78 100644
--- a/tempest/api/orchestration/stacks/test_neutron_resources.py
+++ b/tempest/api/orchestration/stacks/test_neutron_resources.py
@@ -137,8 +137,6 @@
self.assertEqual('NewRouter', router['name'])
self.assertEqual(self.external_network_id,
router['external_gateway_info']['network_id'])
- self.assertEqual(False,
- router['external_gateway_info']['enable_snat'])
self.assertEqual(False, router['admin_state_up'])
@test.attr(type='slow')
diff --git a/tempest/api/orchestration/stacks/test_non_empty_stack.py b/tempest/api/orchestration/stacks/test_non_empty_stack.py
index 9ef95a1..585c90b 100644
--- a/tempest/api/orchestration/stacks/test_non_empty_stack.py
+++ b/tempest/api/orchestration/stacks/test_non_empty_stack.py
@@ -14,8 +14,10 @@
from tempest.api.orchestration import base
from tempest.common.utils import data_utils
-from tempest.test import attr
+from tempest import config
+from tempest import test
+CONF = config.CONF
LOG = logging.getLogger(__name__)
@@ -27,13 +29,15 @@
super(StacksTestJSON, cls).setUpClass()
cls.stack_name = data_utils.rand_name('heat')
template = cls.load_template('non_empty_stack')
-
+ image_id = (CONF.orchestration.image_ref or
+ cls._create_image()['id'])
# create the stack
cls.stack_identifier = cls.create_stack(
cls.stack_name,
template,
parameters={
- 'trigger': 'start'
+ 'trigger': 'start',
+ 'image': image_id
})
cls.stack_id = cls.stack_identifier.split('/')[1]
cls.resource_name = 'fluffy'
@@ -48,14 +52,14 @@
self.assertEqual(expected_num, len(stacks))
return stacks
- @attr(type='gate')
+ @test.attr(type='gate')
def test_stack_list(self):
"""Created stack should be in the list of existing stacks."""
stacks = self._list_stacks()
stacks_names = map(lambda stack: stack['stack_name'], stacks)
self.assertIn(self.stack_name, stacks_names)
- @attr(type='gate')
+ @test.attr(type='gate')
def test_stack_show(self):
"""Getting details about created stack should be possible."""
resp, stack = self.client.get_stack(self.stack_name)
@@ -75,7 +79,7 @@
self.assertEqual(self.stack_id, stack['id'])
self.assertEqual('fluffy', stack['outputs'][0]['output_key'])
- @attr(type='gate')
+ @test.attr(type='gate')
def test_suspend_resume_stack(self):
"""Suspend and resume a stack."""
resp, suspend_stack = self.client.suspend_stack(self.stack_identifier)
@@ -87,14 +91,14 @@
self.client.wait_for_stack_status(self.stack_identifier,
'RESUME_COMPLETE')
- @attr(type='gate')
+ @test.attr(type='gate')
def test_list_resources(self):
"""Getting list of created resources for the stack should be possible.
"""
resources = self.list_resources(self.stack_identifier)
self.assertEqual({self.resource_name: self.resource_type}, resources)
- @attr(type='gate')
+ @test.attr(type='gate')
def test_show_resource(self):
"""Getting details about created resource should be possible."""
resp, resource = self.client.get_resource(self.stack_identifier,
@@ -108,7 +112,7 @@
self.assertEqual(self.resource_name, resource['logical_resource_id'])
self.assertEqual(self.resource_type, resource['resource_type'])
- @attr(type='gate')
+ @test.attr(type='gate')
def test_resource_metadata(self):
"""Getting metadata for created resources should be possible."""
resp, metadata = self.client.show_resource_metadata(
@@ -118,7 +122,7 @@
self.assertIsInstance(metadata, dict)
self.assertEqual(['Tom', 'Stinky'], metadata.get('kittens', None))
- @attr(type='gate')
+ @test.attr(type='gate')
def test_list_events(self):
"""Getting list of created events for the stack should be possible."""
resp, events = self.client.list_events(self.stack_identifier)
@@ -134,7 +138,7 @@
self.assertIn('CREATE_IN_PROGRESS', resource_statuses)
self.assertIn('CREATE_COMPLETE', resource_statuses)
- @attr(type='gate')
+ @test.attr(type='gate')
def test_show_event(self):
"""Getting details about an event should be possible."""
resp, events = self.client.list_resource_events(self.stack_identifier,
diff --git a/tempest/api/orchestration/stacks/test_nova_keypair_resources.py b/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
index 60b8dc1..cb70d07 100644
--- a/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
+++ b/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
@@ -15,7 +15,7 @@
from tempest.api.orchestration import base
from tempest.common.utils import data_utils
-from tempest.test import attr
+from tempest import test
LOG = logging.getLogger(__name__)
@@ -46,7 +46,7 @@
for resource in resources:
cls.test_resources[resource['logical_resource_id']] = resource
- @attr(type='slow')
+ @test.attr(type='slow')
def test_created_resources(self):
"""Verifies created keypair resource."""
resources = [('KeyPairSavePrivate', 'OS::Nova::KeyPair'),
@@ -59,7 +59,7 @@
self.assertEqual(resource_type, resource['resource_type'])
self.assertEqual('CREATE_COMPLETE', resource['resource_status'])
- @attr(type='slow')
+ @test.attr(type='slow')
def test_stack_keypairs_output(self):
resp, stack = self.client.get_stack(self.stack_name)
self.assertEqual('200', resp['status'])
@@ -70,13 +70,13 @@
output_map[outputs['output_key']] = outputs['output_value']
#Test that first key generated public and private keys
self.assertTrue('KeyPair_PublicKey' in output_map)
- self.assertTrue("Generated by" in output_map['KeyPair_PublicKey'])
+ self.assertTrue("Generated" in output_map['KeyPair_PublicKey'])
self.assertTrue('KeyPair_PrivateKey' in output_map)
self.assertTrue('-----BEGIN' in output_map['KeyPair_PrivateKey'])
#Test that second key generated public key, and private key is not
#in the output due to save_private_key = false
self.assertTrue('KeyPairDontSavePrivate_PublicKey' in output_map)
- self.assertTrue('Generated by' in
+ self.assertTrue('Generated' in
output_map['KeyPairDontSavePrivate_PublicKey'])
self.assertTrue(u'KeyPairDontSavePrivate_PrivateKey' in output_map)
private_key = output_map['KeyPairDontSavePrivate_PrivateKey']
diff --git a/tempest/api/orchestration/stacks/test_server_cfn_init.py b/tempest/api/orchestration/stacks/test_server_cfn_init.py
deleted file mode 100644
index cb5d941..0000000
--- a/tempest/api/orchestration/stacks/test_server_cfn_init.py
+++ /dev/null
@@ -1,129 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import json
-import testtools
-
-from tempest.api.orchestration import base
-from tempest.common.utils import data_utils
-from tempest.common.utils.linux import remote_client
-from tempest import config
-from tempest import exceptions
-from tempest.openstack.common import log as logging
-from tempest import test
-
-CONF = config.CONF
-LOG = logging.getLogger(__name__)
-
-
-class ServerCfnInitTestJSON(base.BaseOrchestrationTest):
- existing_keypair = CONF.orchestration.keypair_name is not None
-
- @classmethod
- @test.safe_setup
- def setUpClass(cls):
- super(ServerCfnInitTestJSON, cls).setUpClass()
- if not CONF.orchestration.image_ref:
- raise cls.skipException("No image available to test")
- template = cls.load_template('cfn_init_signal')
- stack_name = data_utils.rand_name('heat')
- if CONF.orchestration.keypair_name:
- keypair_name = CONF.orchestration.keypair_name
- else:
- cls.keypair = cls._create_keypair()
- keypair_name = cls.keypair['name']
-
- # create the stack
- cls.stack_identifier = cls.create_stack(
- stack_name,
- template,
- parameters={
- 'key_name': keypair_name,
- 'flavor': CONF.orchestration.instance_type,
- 'image': CONF.orchestration.image_ref,
- 'network': cls._get_default_network()['id'],
- 'timeout': CONF.orchestration.build_timeout
- })
-
- @test.attr(type='slow')
- @testtools.skipIf(existing_keypair, 'Server ssh tests are disabled.')
- def test_can_log_into_created_server(self):
-
- sid = self.stack_identifier
- rid = 'SmokeServer'
-
- # wait for create to complete.
- self.client.wait_for_stack_status(sid, 'CREATE_COMPLETE')
-
- resp, body = self.client.get_resource(sid, rid)
- self.assertEqual('CREATE_COMPLETE', body['resource_status'])
-
- # fetch the IP address from servers client, since we can't get it
- # from the stack until stack create is complete
- resp, server = self.servers_client.get_server(
- body['physical_resource_id'])
-
- # Check that the user can authenticate with the generated password
- linux_client = remote_client.RemoteClient(server, 'ec2-user',
- pkey=self.keypair[
- 'private_key'])
- linux_client.validate_authentication()
-
- @test.attr(type='slow')
- def test_all_resources_created(self):
- sid = self.stack_identifier
- self.client.wait_for_resource_status(
- sid, 'WaitHandle', 'CREATE_COMPLETE')
- self.client.wait_for_resource_status(
- sid, 'SmokeSecurityGroup', 'CREATE_COMPLETE')
- self.client.wait_for_resource_status(
- sid, 'SmokeKeys', 'CREATE_COMPLETE')
- self.client.wait_for_resource_status(
- sid, 'CfnUser', 'CREATE_COMPLETE')
- self.client.wait_for_resource_status(
- sid, 'SmokeServer', 'CREATE_COMPLETE')
- try:
- self.client.wait_for_resource_status(
- sid, 'WaitCondition', 'CREATE_COMPLETE')
- except (exceptions.StackResourceBuildErrorException,
- exceptions.TimeoutException) as e:
- # attempt to log the server console to help with debugging
- # the cause of the server not signalling the waitcondition
- # to heat.
- resp, body = self.client.get_resource(sid, 'SmokeServer')
- server_id = body['physical_resource_id']
- LOG.debug('Console output for %s', server_id)
- resp, output = self.servers_client.get_console_output(
- server_id, None)
- LOG.debug(output)
- raise e
-
- # wait for create to complete.
- self.client.wait_for_stack_status(sid, 'CREATE_COMPLETE')
-
- # fetch the stack
- resp, body = self.client.get_stack(sid)
- self.assertEqual('CREATE_COMPLETE', body['stack_status'])
-
- # fetch the stack
- resp, body = self.client.get_stack(sid)
- self.assertEqual('CREATE_COMPLETE', body['stack_status'])
-
- # This is an assert of great significance, as it means the following
- # has happened:
- # - cfn-init read the provided metadata and wrote out a file
- # - a user was created and credentials written to the server
- # - a cfn-signal was built which was signed with provided credentials
- # - the wait condition was fulfilled and the stack has changed state
- wait_status = json.loads(
- self.stack_output(body, 'WaitConditionStatus'))
- self.assertEqual('smoke test complete', wait_status['00000'])
diff --git a/tempest/api/orchestration/stacks/test_stacks.py b/tempest/api/orchestration/stacks/test_stacks.py
index 867995c..5b45d82 100644
--- a/tempest/api/orchestration/stacks/test_stacks.py
+++ b/tempest/api/orchestration/stacks/test_stacks.py
@@ -13,7 +13,7 @@
from tempest.api.orchestration import base
from tempest.common.utils import data_utils
from tempest.openstack.common import log as logging
-from tempest.test import attr
+from tempest import test
LOG = logging.getLogger(__name__)
@@ -26,13 +26,13 @@
def setUpClass(cls):
super(StacksTestJSON, cls).setUpClass()
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_stack_list_responds(self):
resp, stacks = self.client.list_stacks()
self.assertEqual('200', resp['status'])
self.assertIsInstance(stacks, list)
- @attr(type='smoke')
+ @test.attr(type='smoke')
def test_stack_crud_no_resources(self):
stack_name = data_utils.rand_name('heat')
diff --git a/tempest/api/orchestration/stacks/test_volumes.py b/tempest/api/orchestration/stacks/test_volumes.py
new file mode 100644
index 0000000..2544c41
--- /dev/null
+++ b/tempest/api/orchestration/stacks/test_volumes.py
@@ -0,0 +1,101 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+
+from tempest.api.orchestration import base
+from tempest.common.utils import data_utils
+from tempest import config
+from tempest import exceptions
+from tempest import test
+
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+class CinderResourcesTest(base.BaseOrchestrationTest):
+
+ @classmethod
+ def setUpClass(cls):
+ super(CinderResourcesTest, cls).setUpClass()
+ if not CONF.service_available.cinder:
+ raise cls.skipException('Cinder support is required')
+
+ def _cinder_verify(self, volume_id):
+ self.assertIsNotNone(volume_id)
+ resp, volume = self.volumes_client.get_volume(volume_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual('available', volume.get('status'))
+ self.assertEqual(1, volume.get('size'))
+ self.assertEqual('a descriptive description',
+ volume.get('display_description'))
+
+ def _outputs_verify(self, stack_identifier):
+ self.assertEqual('available',
+ self.get_stack_output(stack_identifier, 'status'))
+ self.assertEqual('1',
+ self.get_stack_output(stack_identifier, 'size'))
+ self.assertEqual('a descriptive description',
+ self.get_stack_output(stack_identifier,
+ 'display_description'))
+
+ @test.attr(type='gate')
+ def test_cinder_volume_create_delete(self):
+ """Create and delete a volume via OS::Cinder::Volume."""
+ stack_name = data_utils.rand_name('heat')
+ template = self.load_template('cinder_basic')
+ stack_identifier = self.create_stack(stack_name, template)
+ self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
+
+ # Verify with cinder that the volume exists, with matching details
+ volume_id = self.get_stack_output(stack_identifier, 'volume_id')
+ self._cinder_verify(volume_id)
+
+ # Verify the stack outputs are as expected
+ self._outputs_verify(stack_identifier)
+
+ # Delete the stack and ensure the volume is gone
+ self.client.delete_stack(stack_identifier)
+ self.client.wait_for_stack_status(stack_identifier, 'DELETE_COMPLETE')
+ self.assertRaises(exceptions.NotFound,
+ self.volumes_client.get_volume,
+ volume_id)
+
+ def _cleanup_volume(self, volume_id):
+ """Cleanup the volume direct with cinder."""
+ resp = self.volumes_client.delete_volume(volume_id)
+ self.assertEqual(202, resp[0].status)
+ self.volumes_client.wait_for_resource_deletion(volume_id)
+
+ @test.attr(type='gate')
+ def test_cinder_volume_create_delete_retain(self):
+ """Ensure the 'Retain' deletion policy is respected."""
+ stack_name = data_utils.rand_name('heat')
+ template = self.load_template('cinder_basic_delete_retain')
+ stack_identifier = self.create_stack(stack_name, template)
+ self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
+
+ # Verify with cinder that the volume exists, with matching details
+ volume_id = self.get_stack_output(stack_identifier, 'volume_id')
+ self.addCleanup(self._cleanup_volume, volume_id)
+ self._cinder_verify(volume_id)
+
+ # Verify the stack outputs are as expected
+ self._outputs_verify(stack_identifier)
+
+ # Delete the stack and ensure the volume is *not* gone
+ self.client.delete_stack(stack_identifier)
+ self.client.wait_for_stack_status(stack_identifier, 'DELETE_COMPLETE')
+ self._cinder_verify(volume_id)
+
+ # Volume cleanup happens via addCleanup calling _cleanup_volume
diff --git a/tempest/api/queuing/base.py b/tempest/api/queuing/base.py
index 6c22719..5649619 100644
--- a/tempest/api/queuing/base.py
+++ b/tempest/api/queuing/base.py
@@ -50,6 +50,42 @@
@classmethod
def delete_queue(cls, queue_name):
- """Wrapper utility that returns a test queue."""
+ """Wrapper utility that deletes a test queue."""
resp, body = cls.client.delete_queue(queue_name)
return resp, body
+
+ @classmethod
+ def check_queue_exists(cls, queue_name):
+ """Wrapper utility that checks the existence of a test queue."""
+ resp, body = cls.client.get_queue(queue_name)
+ return resp, body
+
+ @classmethod
+ def check_queue_exists_head(cls, queue_name):
+ """Wrapper utility checks the head of a queue via http HEAD."""
+ resp, body = cls.client.head_queue(queue_name)
+ return resp, body
+
+ @classmethod
+ def list_queues(cls):
+ """Wrapper utility that lists queues."""
+ resp, body = cls.client.list_queues()
+ return resp, body
+
+ @classmethod
+ def get_queue_stats(cls, queue_name):
+ """Wrapper utility that returns the queue stats."""
+ resp, body = cls.client.get_queue_stats(queue_name)
+ return resp, body
+
+ @classmethod
+ def get_queue_metadata(cls, queue_name):
+ """Wrapper utility that gets a queue metadata."""
+ resp, body = cls.client.get_queue_metadata(queue_name)
+ return resp, body
+
+ @classmethod
+ def set_queue_metadata(cls, queue_name, rbody):
+ """Wrapper utility that sets the metadata of a queue."""
+ resp, body = cls.client.set_queue_metadata(queue_name, rbody)
+ return resp, body
diff --git a/tempest/api/queuing/test_queues.py b/tempest/api/queuing/test_queues.py
index 4d03f7e..e43178a 100644
--- a/tempest/api/queuing/test_queues.py
+++ b/tempest/api/queuing/test_queues.py
@@ -14,6 +14,8 @@
# limitations under the License.
import logging
+from six import moves
+from testtools import matchers
from tempest.api.queuing import base
from tempest.common.utils import data_utils
@@ -43,18 +45,86 @@
@classmethod
def setUpClass(cls):
super(TestManageQueue, cls).setUpClass()
- cls.queue_name = data_utils.rand_name('Queues-Test')
- # Create Queue
- cls.client.create_queue(cls.queue_name)
+ cls.queues = list()
+ for _ in moves.xrange(5):
+ queue_name = data_utils.rand_name('Queues-Test')
+ cls.queues.append(queue_name)
+ # Create Queue
+ cls.client.create_queue(queue_name)
@test.attr(type='smoke')
def test_delete_queue(self):
# Delete Queue
- resp, body = self.delete_queue(self.queue_name)
+ queue_name = self.queues.pop()
+ resp, body = self.delete_queue(queue_name)
self.assertEqual('204', resp['status'])
self.assertEqual('', body)
+ @test.attr(type='smoke')
+ def test_check_queue_existence(self):
+ # Checking Queue Existence
+ for queue_name in self.queues:
+ resp, body = self.check_queue_exists(queue_name)
+ self.assertEqual('204', resp['status'])
+ self.assertEqual('', body)
+
+ @test.attr(type='smoke')
+ def test_check_queue_head(self):
+ # Checking Queue Existence by calling HEAD
+ for queue_name in self.queues:
+ resp, body = self.check_queue_exists_head(queue_name)
+ self.assertEqual('204', resp['status'])
+ self.assertEqual('', body)
+
+ @test.attr(type='smoke')
+ def test_list_queues(self):
+ # Listing queues
+ resp, body = self.list_queues()
+ self.assertEqual(len(body['queues']), len(self.queues))
+ for item in body['queues']:
+ self.assertIn(item['name'], self.queues)
+
+ @test.attr(type='smoke')
+ def test_get_queue_stats(self):
+ # Retrieve random queue
+ queue_name = self.queues[data_utils.rand_int_id(0,
+ len(self.queues) - 1)]
+ # Get Queue Stats for a newly created Queue
+ resp, body = self.get_queue_stats(queue_name)
+ msgs = body['messages']
+ for element in ('free', 'claimed', 'total'):
+ self.assertEqual(0, msgs[element])
+ for element in ('oldest', 'newest'):
+ self.assertNotIn(element, msgs)
+
+ @test.attr(type='smoke')
+ def test_set_and_get_queue_metadata(self):
+ # Retrieve random queue
+ queue_name = self.queues[data_utils.rand_int_id(0,
+ len(self.queues) - 1)]
+ # Check the Queue has no metadata
+ resp, body = self.get_queue_metadata(queue_name)
+ self.assertEqual('200', resp['status'])
+ self.assertThat(body, matchers.HasLength(0))
+ # Create metadata
+ key3 = [0, 1, 2, 3, 4]
+ key2 = data_utils.rand_name('value')
+ req_body1 = dict()
+ req_body1[data_utils.rand_name('key3')] = key3
+ req_body1[data_utils.rand_name('key2')] = key2
+ req_body = dict()
+ req_body[data_utils.rand_name('key1')] = req_body1
+ # Set Queue Metadata
+ resp, body = self.set_queue_metadata(queue_name, req_body)
+ self.assertEqual('204', resp['status'])
+ self.assertEqual('', body)
+ # Get Queue Metadata
+ resp, body = self.get_queue_metadata(queue_name)
+ self.assertEqual('200', resp['status'])
+ self.assertThat(body, matchers.Equals(req_body))
+
@classmethod
def tearDownClass(cls):
- cls.client.delete_queue(cls.queue_name)
+ for queue_name in cls.queues:
+ cls.client.delete_queue(queue_name)
super(TestManageQueue, cls).tearDownClass()
diff --git a/tempest/api/telemetry/base.py b/tempest/api/telemetry/base.py
index c4614c6..2b422fd 100644
--- a/tempest/api/telemetry/base.py
+++ b/tempest/api/telemetry/base.py
@@ -10,9 +10,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+import time
+
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
+from tempest.openstack.common import timeutils
import tempest.test
CONF = config.CONF
@@ -29,6 +32,12 @@
super(BaseTelemetryTest, cls).setUpClass()
os = cls.get_client_manager()
cls.telemetry_client = os.telemetry_client
+ cls.servers_client = os.servers_client
+ cls.flavors_client = os.flavors_client
+
+ cls.nova_notifications = ['memory', 'vcpus', 'disk.root.size',
+ 'disk.ephemeral.size']
+ cls.server_ids = []
cls.alarm_ids = []
@classmethod
@@ -41,11 +50,46 @@
return resp, body
@classmethod
- def tearDownClass(cls):
- for alarm_id in cls.alarm_ids:
+ def create_server(cls):
+ resp, body = cls.servers_client.create_server(
+ data_utils.rand_name('ceilometer-instance'),
+ CONF.compute.image_ref, CONF.compute.flavor_ref,
+ wait_until='ACTIVE')
+ if resp['status'] == '202':
+ cls.server_ids.append(body['id'])
+ return resp, body
+
+ @staticmethod
+ def cleanup_resources(method, list_of_ids):
+ for resource_id in list_of_ids:
try:
- cls.telemetry_client.delete_alarm(alarm_id)
+ method(resource_id)
except exceptions.NotFound:
pass
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.cleanup_resources(cls.telemetry_client.delete_alarm, cls.alarm_ids)
+ cls.cleanup_resources(cls.servers_client.delete_server, cls.server_ids)
cls.clear_isolated_creds()
super(BaseTelemetryTest, cls).tearDownClass()
+
+ def await_samples(self, metric, query):
+ """
+ This method is to wait for sample to add it to database.
+ There are long time delays when using Postgresql (or Mysql)
+ database as ceilometer backend
+ """
+ timeout = CONF.compute.build_timeout
+ start = timeutils.utcnow()
+ while timeutils.delta_seconds(start, timeutils.utcnow()) < timeout:
+ resp, body = self.telemetry_client.list_samples(metric, query)
+ self.assertEqual(resp.status, 200)
+ if body:
+ return resp, body
+ time.sleep(CONF.compute.build_interval)
+
+ raise exceptions.TimeoutException(
+ 'Sample for metric:%s with query:%s has not been added to the '
+ 'database within %d seconds' % (metric, query,
+ CONF.compute.build_timeout))
diff --git a/tempest/api/telemetry/test_telemetry_alarming_api.py b/tempest/api/telemetry/test_telemetry_alarming_api.py
index a59d3ae..95758e8 100644
--- a/tempest/api/telemetry/test_telemetry_alarming_api.py
+++ b/tempest/api/telemetry/test_telemetry_alarming_api.py
@@ -11,53 +11,104 @@
# under the License.
from tempest.api.telemetry import base
+from tempest.common.utils import data_utils
from tempest import exceptions
-from tempest.test import attr
+from tempest import test
class TelemetryAlarmingAPITestJSON(base.BaseTelemetryTest):
_interface = 'json'
- @attr(type="gate")
- def test_alarm_list(self):
- # Create an alarm to verify in the list of alarms
- created_alarm_ids = list()
- fetched_ids = list()
- rules = {'meter_name': 'cpu_util',
- 'comparison_operator': 'gt',
- 'threshold': 80.0,
- 'period': 70}
- for i in range(3):
- resp, body = self.create_alarm(threshold_rule=rules)
- created_alarm_ids.append(body['alarm_id'])
+ @classmethod
+ def setUpClass(cls):
+ super(TelemetryAlarmingAPITestJSON, cls).setUpClass()
+ cls.rule = {'meter_name': 'cpu_util',
+ 'comparison_operator': 'gt',
+ 'threshold': 80.0,
+ 'period': 70}
+ for i in range(2):
+ cls.create_alarm(threshold_rule=cls.rule)
+ @test.attr(type="gate")
+ def test_alarm_list(self):
# List alarms
resp, alarm_list = self.telemetry_client.list_alarms()
- self.assertEqual(int(resp['status']), 200)
+ self.assertEqual(200, resp.status)
# Verify created alarm in the list
fetched_ids = [a['alarm_id'] for a in alarm_list]
- missing_alarms = [a for a in created_alarm_ids if a not in fetched_ids]
+ missing_alarms = [a for a in self.alarm_ids if a not in fetched_ids]
self.assertEqual(0, len(missing_alarms),
"Failed to find the following created alarm(s)"
" in a fetched list: %s" %
', '.join(str(a) for a in missing_alarms))
- @attr(type="gate")
- def test_create_alarm(self):
- rules = {'meter_name': 'cpu_util',
- 'comparison_operator': 'gt',
- 'threshold': 80.0,
- 'period': 70}
- resp, body = self.create_alarm(threshold_rule=rules)
- self.alarm_id = body['alarm_id']
- self.assertEqual(int(resp['status']), 201)
- self.assertDictContainsSubset(rules, body['threshold_rule'])
- resp, body = self.telemetry_client.get_alarm(self.alarm_id)
- self.assertEqual(int(resp['status']), 200)
- self.assertDictContainsSubset(rules, body['threshold_rule'])
- resp, _ = self.telemetry_client.delete_alarm(self.alarm_id)
- self.assertEqual(int(resp['status']), 204)
+ @test.attr(type="gate")
+ def test_create_update_get_delete_alarm(self):
+ # Create an alarm
+ alarm_name = data_utils.rand_name('telemetry_alarm')
+ resp, body = self.telemetry_client.create_alarm(
+ name=alarm_name, type='threshold', threshold_rule=self.rule)
+ self.assertEqual(201, resp.status)
+ self.assertEqual(alarm_name, body['name'])
+ alarm_id = body['alarm_id']
+ self.assertDictContainsSubset(self.rule, body['threshold_rule'])
+ # Update alarm with new rule and new name
+ new_rule = {'meter_name': 'cpu',
+ 'comparison_operator': 'eq',
+ 'threshold': 70.0,
+ 'period': 60}
+ alarm_name = data_utils.rand_name('telemetry-alarm-update')
+ resp, body = self.telemetry_client.update_alarm(
+ alarm_id,
+ threshold_rule=new_rule,
+ name=alarm_name,
+ type='threshold')
+ self.assertEqual(200, resp.status)
+ self.assertEqual(alarm_name, body['name'])
+ self.assertDictContainsSubset(new_rule, body['threshold_rule'])
+ # Get and verify details of an alarm after update
+ resp, body = self.telemetry_client.get_alarm(alarm_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(alarm_name, body['name'])
+ self.assertDictContainsSubset(new_rule, body['threshold_rule'])
+ # Delete alarm and verify if deleted
+ resp, _ = self.telemetry_client.delete_alarm(alarm_id)
+ self.assertEqual(204, resp.status)
self.assertRaises(exceptions.NotFound,
- self.telemetry_client.get_alarm,
- self.alarm_id)
+ self.telemetry_client.get_alarm, alarm_id)
+
+ @test.attr(type="gate")
+ def test_set_get_alarm_state(self):
+ alarm_states = ['ok', 'alarm', 'insufficient data']
+ _, alarm = self.create_alarm(threshold_rule=self.rule)
+ # Set alarm state and verify
+ new_state =\
+ [elem for elem in alarm_states if elem != alarm['state']][0]
+ resp, state = self.telemetry_client.alarm_set_state(alarm['alarm_id'],
+ new_state)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(new_state, state)
+ # Get alarm state and verify
+ resp, state = self.telemetry_client.alarm_get_state(alarm['alarm_id'])
+ self.assertEqual(200, resp.status)
+ self.assertEqual(new_state, state)
+
+ @test.attr(type="gate")
+ def test_create_delete_alarm_with_combination_rule(self):
+ rule = {"alarm_ids": self.alarm_ids,
+ "operator": "or"}
+ # Verifies alarm create
+ alarm_name = data_utils.rand_name('combination_alarm')
+ resp, body = self.telemetry_client.create_alarm(name=alarm_name,
+ combination_rule=rule,
+ type='combination')
+ self.assertEqual(201, resp.status)
+ self.assertEqual(alarm_name, body['name'])
+ alarm_id = body['alarm_id']
+ self.assertDictContainsSubset(rule, body['combination_rule'])
+ # Verify alarm delete
+ resp, _ = self.telemetry_client.delete_alarm(alarm_id)
+ self.assertEqual(204, resp.status)
+ self.assertRaises(exceptions.NotFound,
+ self.telemetry_client.get_alarm, alarm_id)
diff --git a/tempest/api/telemetry/test_telemetry_notification_api.py b/tempest/api/telemetry/test_telemetry_notification_api.py
new file mode 100644
index 0000000..148f5a3
--- /dev/null
+++ b/tempest/api/telemetry/test_telemetry_notification_api.py
@@ -0,0 +1,47 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import testtools
+
+from tempest.api.telemetry import base
+from tempest import config
+from tempest import test
+
+CONF = config.CONF
+
+
+class TelemetryNotificationAPITestJSON(base.BaseTelemetryTest):
+ _interface = 'json'
+
+ @classmethod
+ def setUpClass(cls):
+ if CONF.telemetry.too_slow_to_test:
+ raise cls.skipException("Ceilometer feature for fast work mysql "
+ "is disabled")
+ super(TelemetryNotificationAPITestJSON, cls).setUpClass()
+
+ @test.attr(type="gate")
+ @testtools.skipIf(not CONF.service_available.nova,
+ "Nova is not available.")
+ def test_check_nova_notification(self):
+
+ resp, body = self.create_server()
+ self.assertEqual(resp.status, 202)
+
+ query = ('resource', 'eq', body['id'])
+
+ for metric in self.nova_notifications:
+ self.await_samples(metric, query)
+
+
+class TelemetryNotificationAPITestXML(TelemetryNotificationAPITestJSON):
+ _interface = 'xml'
diff --git a/tempest/api/volume/admin/test_volume_quotas.py b/tempest/api/volume/admin/test_volume_quotas.py
index 2949d56..ecd8836 100644
--- a/tempest/api/volume/admin/test_volume_quotas.py
+++ b/tempest/api/volume/admin/test_volume_quotas.py
@@ -15,6 +15,7 @@
# under the License.
from tempest.api.volume import base
+from tempest.common.utils import data_utils
from tempest import test
QUOTA_KEYS = ['gigabytes', 'snapshots', 'volumes']
@@ -29,8 +30,7 @@
def setUpClass(cls):
super(VolumeQuotasAdminTestJSON, cls).setUpClass()
cls.admin_volume_client = cls.os_adm.volumes_client
- cls.demo_tenant_id = cls.isolated_creds.get_primary_user().get(
- 'tenantId')
+ cls.demo_tenant_id = cls.isolated_creds.get_primary_creds().tenant_id
@test.attr(type='gate')
def test_list_quotas(self):
@@ -100,6 +100,27 @@
self.assertEqual(quota_usage['gigabytes']['in_use'] + 1,
new_quota_usage['gigabytes']['in_use'])
+ @test.attr(type='gate')
+ def test_delete_quota(self):
+ # Admin can delete the resource quota set for a tenant
+ tenant_name = data_utils.rand_name('quota_tenant_')
+ identity_client = self.os_adm.identity_client
+ tenant = identity_client.create_tenant(tenant_name)[1]
+ tenant_id = tenant['id']
+ self.addCleanup(identity_client.delete_tenant, tenant_id)
+ _, quota_set_default = self.quotas_client.get_default_quota_set(
+ tenant_id)
+ volume_default = quota_set_default['volumes']
+
+ self.quotas_client.update_quota_set(tenant_id,
+ volumes=(int(volume_default) + 5))
+
+ resp, _ = self.quotas_client.delete_quota_set(tenant_id)
+ self.assertEqual(200, resp.status)
+
+ _, quota_set_new = self.quotas_client.get_quota_set(tenant_id)
+ self.assertEqual(volume_default, quota_set_new['volumes'])
+
class VolumeQuotasAdminTestXML(VolumeQuotasAdminTestJSON):
_interface = "xml"
diff --git a/tempest/api/volume/admin/test_volume_quotas_negative.py b/tempest/api/volume/admin/test_volume_quotas_negative.py
index 292f8ed..ab88b90 100644
--- a/tempest/api/volume/admin/test_volume_quotas_negative.py
+++ b/tempest/api/volume/admin/test_volume_quotas_negative.py
@@ -26,8 +26,8 @@
@test.safe_setup
def setUpClass(cls):
super(VolumeQuotasNegativeTestJSON, cls).setUpClass()
- demo_user = cls.isolated_creds.get_primary_user()
- cls.demo_tenant_id = demo_user.get('tenantId')
+ demo_user = cls.isolated_creds.get_primary_creds()
+ cls.demo_tenant_id = demo_user.tenant_id
cls.shared_quota_set = {'gigabytes': 3, 'volumes': 1, 'snapshots': 1}
# NOTE(gfidente): no need to restore original quota set
diff --git a/tempest/api/volume/admin/test_volume_services.py b/tempest/api/volume/admin/test_volume_services.py
new file mode 100644
index 0000000..012c231
--- /dev/null
+++ b/tempest/api/volume/admin/test_volume_services.py
@@ -0,0 +1,74 @@
+# Copyright 2014 NEC Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api.volume import base
+from tempest import test
+
+
+class VolumesServicesTestJSON(base.BaseVolumeV1AdminTest):
+ """
+ Tests Volume Services API.
+ volume service list requires admin privileges.
+ """
+ _interface = "json"
+
+ @classmethod
+ def setUpClass(cls):
+ super(VolumesServicesTestJSON, cls).setUpClass()
+ cls.client = cls.os_adm.volume_services_client
+ resp, cls.services = cls.client.list_services()
+ cls.host_name = cls.services[0]['host']
+ cls.binary_name = cls.services[0]['binary']
+
+ @test.attr(type='gate')
+ def test_list_services(self):
+ resp, services = self.client.list_services()
+ self.assertEqual(200, resp.status)
+ self.assertNotEqual(0, len(services))
+
+ @test.attr(type='gate')
+ def test_get_service_by_service_binary_name(self):
+ params = {'binary': self.binary_name}
+ resp, services = self.client.list_services(params)
+ self.assertEqual(200, resp.status)
+ self.assertNotEqual(0, len(services))
+ for service in services:
+ self.assertEqual(self.binary_name, service['binary'])
+
+ @test.attr(type='gate')
+ def test_get_service_by_host_name(self):
+ services_on_host = [service for service in self.services if
+ service['host'] == self.host_name]
+ params = {'host': self.host_name}
+
+ resp, services = self.client.list_services(params)
+
+ # we could have a periodic job checkin between the 2 service
+ # lookups, so only compare binary lists.
+ s1 = map(lambda x: x['binary'], services)
+ s2 = map(lambda x: x['binary'], services_on_host)
+ # sort the lists before comparing, to take out dependency
+ # on order.
+ self.assertEqual(sorted(s1), sorted(s2))
+
+ @test.attr(type='gate')
+ def test_get_service_by_service_and_host_name(self):
+ params = {'host': self.host_name, 'binary': self.binary_name}
+
+ resp, services = self.client.list_services(params)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(1, len(services))
+ self.assertEqual(self.host_name, services[0]['host'])
+ self.assertEqual(self.binary_name, services[0]['binary'])
diff --git a/tempest/api/volume/admin/test_volume_types.py b/tempest/api/volume/admin/test_volume_types.py
index ee1d09a..3b8c214 100644
--- a/tempest/api/volume/admin/test_volume_types.py
+++ b/tempest/api/volume/admin/test_volume_types.py
@@ -118,14 +118,16 @@
'from the created Volume_type')
@test.attr(type='smoke')
- def test_volume_type_encryption_create_get(self):
- # Create/get encryption type.
+ def test_volume_type_encryption_create_get_delete(self):
+ # Create/get/delete encryption type.
provider = "LuksEncryptor"
control_location = "front-end"
name = data_utils.rand_name("volume-type-")
resp, body = self.client.create_volume_type(name)
self.assertEqual(200, resp.status)
self.addCleanup(self._delete_volume_type, body['id'])
+
+ # Create encryption type
resp, encryption_type = self.client.create_encryption_type(
body['id'], provider=provider,
control_location=control_location)
@@ -137,6 +139,8 @@
self.assertEqual(control_location, encryption_type['control_location'],
"The created encryption_type control_location is not "
"equal to the requested control_location")
+
+ # Get encryption type
resp, fetched_encryption_type = self.client.get_encryption_type(
encryption_type['volume_type_id'])
self.assertEqual(200, resp.status)
@@ -148,3 +152,15 @@
fetched_encryption_type['control_location'],
'The fetched encryption_type control_location is '
'different from the created encryption_type')
+
+ # Delete encryption type
+ resp, _ = self.client.delete_encryption_type(
+ encryption_type['volume_type_id'])
+ self.assertEqual(202, resp.status)
+ resource = {"id": encryption_type['volume_type_id'],
+ "type": "encryption-type"}
+ self.client.wait_for_resource_deletion(resource)
+ resp, deleted_encryption_type = self.client.get_encryption_type(
+ encryption_type['volume_type_id'])
+ self.assertEqual(200, resp.status)
+ self.assertEmpty(deleted_encryption_type)
diff --git a/tempest/api/volume/admin/test_volumes_actions.py b/tempest/api/volume/admin/test_volumes_actions.py
index 4496f18..008f739 100644
--- a/tempest/api/volume/admin/test_volumes_actions.py
+++ b/tempest/api/volume/admin/test_volumes_actions.py
@@ -85,24 +85,6 @@
self.volume['id'])
self.assertEqual('error', volume_get['status'])
- @test.attr(type='gate')
- def test_volume_begin_detaching(self):
- # test volume begin detaching : available -> detaching -> available
- resp, body = self.client.volume_begin_detaching(self.volume['id'])
- self.assertEqual(202, resp.status)
- resp_get, volume_get = self.client.get_volume(self.volume['id'])
- self.assertEqual('detaching', volume_get['status'])
-
- @test.attr(type='gate')
- def test_volume_roll_detaching(self):
- # test volume roll detaching : detaching -> in-use -> available
- resp, body = self.client.volume_begin_detaching(self.volume['id'])
- self.assertEqual(202, resp.status)
- resp, body = self.client.volume_roll_detaching(self.volume['id'])
- self.assertEqual(202, resp.status)
- resp_get, volume_get = self.client.get_volume(self.volume['id'])
- self.assertEqual('in-use', volume_get['status'])
-
def test_volume_force_delete_when_volume_is_creating(self):
# test force delete when status of volume is creating
self._create_reset_and_force_delete_temp_volume('creating')
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 2c6050c..67d0203 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -107,7 +107,9 @@
cls.snapshots_client = cls.os.snapshots_client
cls.volumes_client = cls.os.volumes_client
cls.backups_client = cls.os.backups_client
+ cls.volume_services_client = cls.os.volume_services_client
cls.volumes_extension_client = cls.os.volumes_extension_client
+ cls.availability_zone_client = cls.os.volume_availability_zone_client
@classmethod
def create_volume(cls, size=1, **kwargs):
@@ -135,11 +137,7 @@
"in configuration.")
raise cls.skipException(msg)
if CONF.compute.allow_tenant_isolation:
- creds = cls.isolated_creds.get_admin_creds()
- admin_username, admin_tenant_name, admin_password = creds
- cls.os_adm = clients.Manager(username=admin_username,
- password=admin_password,
- tenant_name=admin_tenant_name,
+ cls.os_adm = clients.Manager(cls.isolated_creds.get_admin_creds(),
interface=cls._interface)
else:
cls.os_adm = clients.AdminManager(interface=cls._interface)
diff --git a/tempest/api/compute/v3/servers/test_availability_zone.py b/tempest/api/volume/test_availability_zone.py
similarity index 76%
rename from tempest/api/compute/v3/servers/test_availability_zone.py
rename to tempest/api/volume/test_availability_zone.py
index 5a1e07e..1db7b7b 100644
--- a/tempest/api/compute/v3/servers/test_availability_zone.py
+++ b/tempest/api/volume/test_availability_zone.py
@@ -13,11 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.api.compute import base
+from tempest.api.volume import base
from tempest import test
-class AZV3Test(base.BaseV3ComputeTest):
+class AvailabilityZoneTestJSON(base.BaseVolumeV1Test):
"""
Tests Availability Zone API List
@@ -25,12 +25,16 @@
@classmethod
def setUpClass(cls):
- super(AZV3Test, cls).setUpClass()
+ super(AvailabilityZoneTestJSON, cls).setUpClass()
cls.client = cls.availability_zone_client
@test.attr(type='gate')
- def test_get_availability_zone_list_with_non_admin_user(self):
- # List of availability zone with non-administrator user
+ def test_get_availability_zone_list(self):
+ # List of availability zone
resp, availability_zone = self.client.get_availability_zone_list()
self.assertEqual(200, resp.status)
self.assertTrue(len(availability_zone) > 0)
+
+
+class AvailabilityZoneTestXML(AvailabilityZoneTestJSON):
+ _interface = 'xml'
diff --git a/tempest/api/volume/test_volume_transfers.py b/tempest/api/volume/test_volume_transfers.py
index 55a72c1..82d1364 100644
--- a/tempest/api/volume/test_volume_transfers.py
+++ b/tempest/api/volume/test_volume_transfers.py
@@ -32,32 +32,18 @@
# Add another tenant to test volume-transfer
if CONF.compute.allow_tenant_isolation:
- creds = cls.isolated_creds.get_alt_creds()
- username, tenant_name, password = creds
- cls.os_alt = clients.Manager(username=username,
- password=password,
- tenant_name=tenant_name,
+ cls.os_alt = clients.Manager(cls.isolated_creds.get_alt_creds(),
interface=cls._interface)
- cls.alt_tenant_id = cls.isolated_creds.get_alt_tenant()['id']
-
# Add admin tenant to cleanup resources
- adm_creds = cls.isolated_creds.get_admin_creds()
- admin_username, admin_tenant_name, admin_password = adm_creds
- cls.os_adm = clients.Manager(username=admin_username,
- password=admin_password,
- tenant_name=admin_tenant_name,
+ cls.os_adm = clients.Manager(cls.isolated_creds.get_admin_creds(),
interface=cls._interface)
else:
cls.os_alt = clients.AltManager()
- alt_tenant_name = cls.os_alt.credentials['tenant_name']
- identity_client = cls._get_identity_admin_client()
- _, tenants = identity_client.list_tenants()
- cls.alt_tenant_id = [tnt['id'] for tnt in tenants
- if tnt['name'] == alt_tenant_name][0]
cls.os_adm = clients.ComputeAdminManager(interface=cls._interface)
cls.client = cls.volumes_client
cls.alt_client = cls.os_alt.volumes_client
+ cls.alt_tenant_id = cls.alt_client.tenant_id
cls.adm_client = cls.os_adm.volumes_client
def _delete_volume(self, volume_id):
diff --git a/tempest/api/volume/test_volumes_negative.py b/tempest/api/volume/test_volumes_negative.py
index a8b0a8d..bc5b1dc 100644
--- a/tempest/api/volume/test_volumes_negative.py
+++ b/tempest/api/volume/test_volumes_negative.py
@@ -154,6 +154,7 @@
self.assertRaises(exceptions.NotFound, self.client.delete_volume, '')
@test.attr(type=['negative', 'gate'])
+ @test.services('compute')
def test_attach_volumes_with_nonexistent_volume_id(self):
srv_name = data_utils.rand_name('Instance-')
resp, server = self.servers_client.create_server(srv_name,
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index 6294cd9..26316d2 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -63,6 +63,7 @@
self.assertEqual(params[key], snap[key], msg)
@test.attr(type='gate')
+ @test.services('compute')
def test_snapshot_create_with_volume_in_use(self):
# Create a snapshot when volume status is in-use
# Create a test instance
diff --git a/tempest/api/volume/v2/test_volumes_list.py b/tempest/api/volume/v2/test_volumes_list.py
index 41445d7..e90c957 100644
--- a/tempest/api/volume/v2/test_volumes_list.py
+++ b/tempest/api/volume/v2/test_volumes_list.py
@@ -203,7 +203,7 @@
def _list_details_with_multiple_params(limit=2,
status='available',
sort_dir='asc',
- sort_key='created_at'):
+ sort_key='id'):
params = {'limit': limit,
'status': status,
'sort_dir': sort_dir,
diff --git a/tempest/api_schema/compute/agents.py b/tempest/api_schema/compute/agents.py
new file mode 100644
index 0000000..b04cf64
--- /dev/null
+++ b/tempest/api_schema/compute/agents.py
@@ -0,0 +1,40 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+list_agents = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'agents': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'agent_id': {'type': ['integer', 'string']},
+ 'hypervisor': {'type': 'string'},
+ 'os': {'type': 'string'},
+ 'architecture': {'type': 'string'},
+ 'version': {'type': 'string'},
+ 'url': {'type': 'string', 'format': 'uri'},
+ 'md5hash': {'type': 'string'}
+ },
+ 'required': ['agent_id', 'hypervisor', 'os',
+ 'architecture', 'version', 'url', 'md5hash']
+ }
+ }
+ },
+ 'required': ['agents']
+ }
+}
diff --git a/tempest/api_schema/compute/aggregates.py b/tempest/api_schema/compute/aggregates.py
index a3ab3c8..9393a16 100644
--- a/tempest/api_schema/compute/aggregates.py
+++ b/tempest/api_schema/compute/aggregates.py
@@ -64,3 +64,23 @@
'updated_at'] = {
'type': 'string'
}
+
+common_create_aggregate = {
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'aggregate': aggregate
+ },
+ 'required': ['aggregate']
+ }
+}
+# create-aggregate api doesn't have 'hosts' and 'metadata' attributes.
+del common_create_aggregate['response_body']['properties']['aggregate'][
+ 'properties']['hosts']
+del common_create_aggregate['response_body']['properties']['aggregate'][
+ 'properties']['metadata']
+common_create_aggregate['response_body']['properties']['aggregate'][
+ 'required'] = ['availability_zone', 'created_at', 'deleted', 'deleted_at',
+ 'id', 'name', 'updated_at']
+
+aggregate_add_remove_host = get_aggregate
diff --git a/tempest/api_schema/compute/certificates.py b/tempest/api_schema/compute/certificates.py
new file mode 100644
index 0000000..caac2ab
--- /dev/null
+++ b/tempest/api_schema/compute/certificates.py
@@ -0,0 +1,37 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+_common_schema = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'certificate': {
+ 'type': 'object',
+ 'properties': {
+ 'data': {'type': 'string'},
+ 'private_key': {'type': 'string'},
+ },
+ 'required': ['data', 'private_key'],
+ }
+ },
+ 'required': ['certificate'],
+ }
+}
+
+get_certificate = copy.deepcopy(_common_schema)
+get_certificate['response_body']['properties']['certificate'][
+ 'properties']['private_key'].update({'type': 'null'})
diff --git a/tempest/api_schema/compute/flavors.py b/tempest/api_schema/compute/flavors.py
index fd02780..aa019e4 100644
--- a/tempest/api_schema/compute/flavors.py
+++ b/tempest/api_schema/compute/flavors.py
@@ -36,6 +36,21 @@
}
}
+common_flavor_info = {
+ 'type': 'object',
+ 'properties': {
+ 'name': {'type': 'string'},
+ 'links': parameter_types.links,
+ 'ram': {'type': 'integer'},
+ 'vcpus': {'type': 'integer'},
+ 'swap': {'type': 'integer'},
+ 'disk': {'type': 'integer'},
+ 'id': {'type': 'string'}
+ },
+ 'required': ['name', 'links', 'ram', 'vcpus',
+ 'swap', 'disk', 'id']
+}
+
common_flavor_list_details = {
'status_code': [200],
'response_body': {
@@ -43,22 +58,20 @@
'properties': {
'flavors': {
'type': 'array',
- 'items': {
- 'type': 'object',
- 'properties': {
- 'name': {'type': 'string'},
- 'links': parameter_types.links,
- 'ram': {'type': 'integer'},
- 'vcpus': {'type': 'integer'},
- 'swap': {'type': 'integer'},
- 'disk': {'type': 'integer'},
- 'id': {'type': 'string'}
- },
- 'required': ['name', 'links', 'ram', 'vcpus',
- 'swap', 'disk', 'id']
- }
+ 'items': common_flavor_info
}
},
'required': ['flavors']
}
}
+
+common_flavor_details = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'flavor': common_flavor_info
+ },
+ 'required': ['flavor']
+ }
+}
diff --git a/tempest/api_schema/compute/hosts.py b/tempest/api_schema/compute/hosts.py
index a73e214..2596c27 100644
--- a/tempest/api_schema/compute/hosts.py
+++ b/tempest/api_schema/compute/hosts.py
@@ -12,6 +12,15 @@
# License for the specific language governing permissions and limitations
# under the License.
+common_start_up_body = {
+ 'type': 'object',
+ 'properties': {
+ 'host': {'type': 'string'},
+ 'power_action': {'enum': ['startup']}
+ },
+ 'required': ['host', 'power_action']
+}
+
list_hosts = {
'status_code': [200],
'response_body': {
@@ -64,3 +73,13 @@
'required': ['host']
}
}
+
+update_host_common = {
+ 'type': 'object',
+ 'properties': {
+ 'host': {'type': 'string'},
+ 'maintenance_mode': {'enum': ['on_maintenance', 'off_maintenance']},
+ 'status': {'enum': ['enabled', 'disabled']}
+ },
+ 'required': ['host', 'maintenance_mode', 'status']
+}
diff --git a/tempest/api_schema/compute/interfaces.py b/tempest/api_schema/compute/interfaces.py
index 1e15c18..79a8f42 100644
--- a/tempest/api_schema/compute/interfaces.py
+++ b/tempest/api_schema/compute/interfaces.py
@@ -12,6 +12,36 @@
# License for the specific language governing permissions and limitations
# under the License.
+from tempest.api_schema.compute import parameter_types
+
delete_interface = {
'status_code': [202]
}
+
+interface_common_info = {
+ 'type': 'object',
+ 'properties': {
+ 'port_state': {'type': 'string'},
+ 'fixed_ips': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'subnet_id': {
+ 'type': 'string',
+ 'format': 'uuid'
+ },
+ 'ip_address': {
+ 'type': 'string',
+ 'format': 'ipv4'
+ }
+ },
+ 'required': ['subnet_id', 'ip_address']
+ }
+ },
+ 'port_id': {'type': 'string', 'format': 'uuid'},
+ 'net_id': {'type': 'string', 'format': 'uuid'},
+ 'mac_addr': parameter_types.mac_address
+ },
+ 'required': ['port_state', 'fixed_ips', 'port_id', 'net_id', 'mac_addr']
+}
diff --git a/tempest/api_schema/compute/migrations.py b/tempest/api_schema/compute/migrations.py
new file mode 100644
index 0000000..6723869
--- /dev/null
+++ b/tempest/api_schema/compute/migrations.py
@@ -0,0 +1,56 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+list_migrations = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'migrations': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ # NOTE: Now the type of 'id' is integer, but here
+ # allows 'string' also because we will be able to
+ # change it to 'uuid' in the future.
+ 'id': {'type': ['integer', 'string']},
+ 'status': {'type': 'string'},
+ 'instance_uuid': {'type': 'string'},
+ 'source_node': {'type': 'string'},
+ 'source_compute': {'type': 'string'},
+ 'dest_node': {'type': 'string'},
+ 'dest_compute': {'type': 'string'},
+ 'dest_host': {'type': 'string'},
+ 'old_instance_type_id': {
+ 'type': ['integer', 'string']
+ },
+ 'new_instance_type_id': {
+ 'type': ['integer', 'string']
+ },
+ 'created_at': {'type': 'string'},
+ 'updated_at': {'type': ['string', 'null']}
+ },
+ 'required': [
+ 'id', 'status', 'instance_uuid', 'source_node',
+ 'source_compute', 'dest_node', 'dest_compute',
+ 'dest_host', 'old_instance_type_id',
+ 'new_instance_type_id', 'created_at', 'updated_at'
+ ]
+ }
+ }
+ },
+ 'required': ['migrations']
+ }
+}
diff --git a/tempest/api_schema/compute/parameter_types.py b/tempest/api_schema/compute/parameter_types.py
index 95d5b92..4a1dfdd 100644
--- a/tempest/api_schema/compute/parameter_types.py
+++ b/tempest/api_schema/compute/parameter_types.py
@@ -31,3 +31,37 @@
'type': 'string',
'pattern': '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
}
+
+access_ip_v4 = {
+ 'type': 'string',
+ 'anyOf': [{'format': 'ipv4'}, {'enum': ['']}]
+}
+
+access_ip_v6 = {
+ 'type': 'string',
+ 'anyOf': [{'format': 'ipv6'}, {'enum': ['']}]
+}
+
+addresses = {
+ 'type': 'object',
+ 'patternProperties': {
+ # NOTE: Here is for 'private' or something.
+ '^[a-zA-Z0-9-_.]+$': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'version': {'type': 'integer'},
+ 'addr': {
+ 'type': 'string',
+ 'anyOf': [
+ {'format': 'ipv4'},
+ {'format': 'ipv6'}
+ ]
+ }
+ },
+ 'required': ['version', 'addr']
+ }
+ }
+ }
+}
diff --git a/tempest/api_schema/compute/quotas.py b/tempest/api_schema/compute/quotas.py
new file mode 100644
index 0000000..f49771e
--- /dev/null
+++ b/tempest/api_schema/compute/quotas.py
@@ -0,0 +1,41 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+common_quota_set = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'quota_set': {
+ 'type': 'object',
+ 'properties': {
+ 'instances': {'type': 'integer'},
+ 'cores': {'type': 'integer'},
+ 'ram': {'type': 'integer'},
+ 'floating_ips': {'type': 'integer'},
+ 'fixed_ips': {'type': 'integer'},
+ 'metadata_items': {'type': 'integer'},
+ 'key_pairs': {'type': 'integer'},
+ 'security_groups': {'type': 'integer'},
+ 'security_group_rules': {'type': 'integer'}
+ },
+ 'required': ['instances', 'cores', 'ram',
+ 'floating_ips', 'fixed_ips',
+ 'metadata_items', 'key_pairs',
+ 'security_groups', 'security_group_rules']
+ }
+ },
+ 'required': ['quota_set']
+ }
+}
diff --git a/tempest/api_schema/compute/servers.py b/tempest/api_schema/compute/servers.py
index a273abb..4e8c201 100644
--- a/tempest/api_schema/compute/servers.py
+++ b/tempest/api_schema/compute/servers.py
@@ -12,6 +12,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+import copy
+
+from tempest.api_schema.compute import parameter_types
+
get_password = {
'status_code': [200],
'response_body': {
@@ -44,6 +48,119 @@
}
}
+base_update_server = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'server': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': ['integer', 'string']},
+ 'name': {'type': 'string'},
+ 'status': {'type': 'string'},
+ 'image': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': ['integer', 'string']},
+ 'links': parameter_types.links
+ },
+ 'required': ['id', 'links']
+ },
+ 'flavor': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': ['integer', 'string']},
+ 'links': parameter_types.links
+ },
+ 'required': ['id', 'links']
+ },
+ 'user_id': {'type': 'string'},
+ 'tenant_id': {'type': 'string'},
+ 'created': {'type': 'string'},
+ 'updated': {'type': 'string'},
+ 'progress': {'type': 'integer'},
+ 'metadata': {'type': 'object'},
+ 'links': parameter_types.links,
+ 'addresses': parameter_types.addresses,
+ },
+ # NOTE(GMann): 'progress' attribute is present in the response
+ # only when server's status is one of the progress statuses
+ # ("ACTIVE","BUILD", "REBUILD", "RESIZE","VERIFY_RESIZE")
+ # So it is not defined as 'required'.
+ 'required': ['id', 'name', 'status', 'image', 'flavor',
+ 'user_id', 'tenant_id', 'created', 'updated',
+ 'metadata', 'links', 'addresses']
+ }
+ }
+ }
+}
+
delete_server = {
'status_code': [204],
}
+
+set_server_metadata = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'metadata': {
+ 'type': 'object',
+ 'patternProperties': {
+ '^.+$': {'type': 'string'}
+ }
+ }
+ },
+ 'required': ['metadata']
+ }
+}
+
+list_server_metadata = copy.deepcopy(set_server_metadata)
+
+update_server_metadata = copy.deepcopy(set_server_metadata)
+
+delete_server_metadata_item = {
+ 'status_code': [204]
+}
+
+list_servers = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'servers': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string'},
+ 'links': parameter_types.links,
+ 'name': {'type': 'string'}
+ },
+ 'required': ['id', 'links', 'name']
+ }
+ }
+ },
+ 'required': ['servers']
+ }
+}
+
+server_actions_common_schema = {
+ 'status_code': [202]
+}
+
+server_actions_delete_password = {
+ 'status_code': [204]
+}
+
+get_console_output = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'output': {'type': 'string'}
+ },
+ 'required': ['output']
+ }
+}
diff --git a/tempest/api_schema/compute/v2/aggregates.py b/tempest/api_schema/compute/v2/aggregates.py
new file mode 100644
index 0000000..bc36044
--- /dev/null
+++ b/tempest/api_schema/compute/v2/aggregates.py
@@ -0,0 +1,25 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.api_schema.compute import aggregates
+
+delete_aggregate = {
+ 'status_code': [200]
+}
+
+create_aggregate = copy.deepcopy(aggregates.common_create_aggregate)
+# V2 API's response status_code is 200
+create_aggregate['status_code'] = [200]
diff --git a/tempest/api_schema/compute/v2/certificates.py b/tempest/api_schema/compute/v2/certificates.py
new file mode 100644
index 0000000..1eb38ce
--- /dev/null
+++ b/tempest/api_schema/compute/v2/certificates.py
@@ -0,0 +1,19 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.api_schema.compute import certificates
+
+create_certificate = copy.deepcopy(certificates._common_schema)
diff --git a/tempest/api_schema/compute/v2/flavors.py b/tempest/api_schema/compute/v2/flavors.py
index 48e6ceb..bee6ecb 100644
--- a/tempest/api_schema/compute/v2/flavors.py
+++ b/tempest/api_schema/compute/v2/flavors.py
@@ -35,3 +35,23 @@
unset_flavor_extra_specs = {
'status_code': [200]
}
+
+create_get_flavor_details = copy.deepcopy(flavors.common_flavor_details)
+
+# 'swap' attributes comes as integre value but if it is empty it comes as "".
+# So defining type of as string and integer.
+create_get_flavor_details['response_body']['properties']['flavor'][
+ 'properties']['swap'] = {'type': ['string', 'integer']}
+
+# Defining extra attributes for V2 flavor schema
+create_get_flavor_details['response_body']['properties']['flavor'][
+ 'properties'].update({'OS-FLV-DISABLED:disabled': {'type': 'boolean'},
+ 'os-flavor-access:is_public': {'type': 'boolean'},
+ 'rxtx_factor': {'type': 'number'},
+ 'OS-FLV-EXT-DATA:ephemeral': {'type': 'integer'}})
+# 'OS-FLV-DISABLED', 'os-flavor-access', 'rxtx_factor' and 'OS-FLV-EXT-DATA'
+# are API extensions. So they are not 'required'.
+
+delete_flavor = {
+ 'status_code': [202]
+}
diff --git a/tempest/api_schema/compute/v2/hosts.py b/tempest/api_schema/compute/v2/hosts.py
index cd6bd7b..86efadf 100644
--- a/tempest/api_schema/compute/v2/hosts.py
+++ b/tempest/api_schema/compute/v2/hosts.py
@@ -14,18 +14,12 @@
import copy
-body = {
- 'type': 'object',
- 'properties': {
- 'host': {'type': 'string'},
- 'power_action': {'enum': ['startup']}
- },
- 'required': ['host', 'power_action']
-}
+from tempest.api_schema.compute import hosts
+
startup_host = {
'status_code': [200],
- 'response_body': body
+ 'response_body': hosts.common_start_up_body
}
# The 'power_action' attribute of 'shutdown_host' API is 'shutdown'
@@ -41,3 +35,8 @@
reboot_host['response_body']['properties']['power_action'] = {
'enum': ['reboot']
}
+
+update_host = {
+ 'status_code': [200],
+ 'response_body': hosts.update_host_common
+}
diff --git a/tempest/api_schema/compute/v2/images.py b/tempest/api_schema/compute/v2/images.py
index fad6b56..d121060 100644
--- a/tempest/api_schema/compute/v2/images.py
+++ b/tempest/api_schema/compute/v2/images.py
@@ -14,43 +14,46 @@
from tempest.api_schema.compute import parameter_types
+common_image_schema = {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': 'string'},
+ 'status': {'type': 'string'},
+ 'updated': {'type': 'string'},
+ 'links': parameter_types.links,
+ 'name': {'type': 'string'},
+ 'created': {'type': 'string'},
+ 'minDisk': {'type': 'integer'},
+ 'minRam': {'type': 'integer'},
+ 'progress': {'type': 'integer'},
+ 'metadata': {'type': 'object'},
+ 'server': {
+ 'type': 'object',
+ 'properties': {
+ # NOTE: Now the type of 'id' is integer, but here
+ # allows 'string' also because we will be able to
+ # change it to 'uuid' in the future.
+ 'id': {'type': ['integer', 'string']},
+ 'links': parameter_types.links
+ },
+ 'required': ['id', 'links']
+ },
+ 'OS-EXT-IMG-SIZE:size': {'type': 'integer'}
+ },
+ # 'server' attributes only comes in response body if image is
+ # associated with any server. 'OS-EXT-IMG-SIZE:size' is API
+ # extension, So those are not defined as 'required'.
+ 'required': ['id', 'status', 'updated', 'links', 'name',
+ 'created', 'minDisk', 'minRam', 'progress',
+ 'metadata']
+}
+
get_image = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
- 'image': {
- 'type': 'object',
- 'properties': {
- 'id': {'type': 'string'},
- 'status': {'type': 'string'},
- 'updated': {'type': 'string'},
- 'links': parameter_types.links,
- 'name': {'type': 'string'},
- 'created': {'type': 'string'},
- 'OS-EXT-IMG-SIZE:size': {'type': 'integer'},
- 'minDisk': {'type': 'integer'},
- 'minRam': {'type': 'integer'},
- 'progress': {'type': 'integer'},
- 'metadata': {'type': 'object'},
- 'server': {
- 'type': 'object',
- 'properties': {
- # NOTE: Now the type of 'id' is integer, but here
- # allows 'string' also because we will be able to
- # change it to 'uuid' in the future.
- 'id': {'type': ['integer', 'string']},
- 'links': parameter_types.links
- },
- 'required': ['id', 'links']
- }
- },
- # 'server' attributes only comes in response body if image is
- # associated with any server. So it is not 'required'
- 'required': ['id', 'status', 'updated', 'links', 'name',
- 'created', 'OS-EXT-IMG-SIZE:size', 'minDisk',
- 'minRam', 'progress', 'metadata']
- }
+ 'image': common_image_schema
},
'required': ['image']
}
@@ -67,20 +70,7 @@
'type': 'object',
'properties': {
'id': {'type': 'string'},
- 'links': {
- 'type': 'array',
- 'items': {
- 'type': 'object',
- 'properties': {
- 'href': {
- 'type': 'string',
- 'format': 'uri'
- },
- 'rel': {'type': 'string'}
- },
- 'required': ['href', 'rel']
- }
- },
+ 'links': parameter_types.links,
'name': {'type': 'string'}
},
'required': ['id', 'links', 'name']
@@ -92,7 +82,17 @@
}
create_image = {
- 'status_code': [202]
+ 'status_code': [202],
+ 'response_header': {
+ 'type': 'object',
+ 'properties': {
+ 'location': {
+ 'type': 'string',
+ 'format': 'uri'
+ }
+ },
+ 'required': ['location']
+ }
}
delete = {
@@ -120,3 +120,17 @@
'required': ['meta']
}
}
+
+list_images_details = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'images': {
+ 'type': 'array',
+ 'items': common_image_schema
+ }
+ },
+ 'required': ['images']
+ }
+}
diff --git a/tempest/api_schema/compute/v2/interfaces.py b/tempest/api_schema/compute/v2/interfaces.py
new file mode 100644
index 0000000..7fca791
--- /dev/null
+++ b/tempest/api_schema/compute/v2/interfaces.py
@@ -0,0 +1,29 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api_schema.compute import interfaces as common_schema
+
+list_interfaces = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'interfaceAttachments': {
+ 'type': 'array',
+ 'items': common_schema.interface_common_info
+ }
+ },
+ 'required': ['interfaceAttachments']
+ }
+}
diff --git a/tempest/api_schema/compute/v2/quotas.py b/tempest/api_schema/compute/v2/quotas.py
index 17dc4dd..31c0458 100644
--- a/tempest/api_schema/compute/v2/quotas.py
+++ b/tempest/api_schema/compute/v2/quotas.py
@@ -12,39 +12,36 @@
# License for the specific language governing permissions and limitations
# under the License.
-quota_set = {
- 'status_code': [200],
- 'response_body': {
- 'type': 'object',
- 'properties': {
- 'quota_set': {
- 'type': 'object',
- 'properties': {
- 'id': {'type': 'string'},
- 'instances': {'type': 'integer'},
- 'cores': {'type': 'integer'},
- 'ram': {'type': 'integer'},
- 'floating_ips': {'type': 'integer'},
- 'fixed_ips': {'type': 'integer'},
- 'metadata_items': {'type': 'integer'},
- 'injected_files': {'type': 'integer'},
- 'injected_file_content_bytes': {'type': 'integer'},
- 'injected_file_path_bytes': {'type': 'integer'},
- 'key_pairs': {'type': 'integer'},
- 'security_groups': {'type': 'integer'},
- 'security_group_rules': {'type': 'integer'}
- },
- 'required': ['id', 'instances', 'cores', 'ram',
- 'floating_ips', 'fixed_ips',
- 'metadata_items', 'injected_files',
- 'injected_file_content_bytes',
- 'injected_file_path_bytes', 'key_pairs',
- 'security_groups', 'security_group_rules']
- }
- },
- 'required': ['quota_set']
- }
-}
+import copy
+
+from tempest.api_schema.compute import quotas
+
+quota_set = copy.deepcopy(quotas.common_quota_set)
+quota_set['response_body']['properties']['quota_set']['properties'][
+ 'id'] = {'type': 'string'}
+quota_set['response_body']['properties']['quota_set']['properties'][
+ 'injected_files'] = {'type': 'integer'}
+quota_set['response_body']['properties']['quota_set']['properties'][
+ 'injected_file_content_bytes'] = {'type': 'integer'}
+quota_set['response_body']['properties']['quota_set']['properties'][
+ 'injected_file_path_bytes'] = {'type': 'integer'}
+quota_set['response_body']['properties']['quota_set']['required'].extend([
+ 'id',
+ 'injected_files',
+ 'injected_file_content_bytes',
+ 'injected_file_path_bytes'])
+
+quota_set_update = copy.deepcopy(quotas.common_quota_set)
+quota_set_update['response_body']['properties']['quota_set']['properties'][
+ 'injected_files'] = {'type': 'integer'}
+quota_set_update['response_body']['properties']['quota_set']['properties'][
+ 'injected_file_content_bytes'] = {'type': 'integer'}
+quota_set_update['response_body']['properties']['quota_set']['properties'][
+ 'injected_file_path_bytes'] = {'type': 'integer'}
+quota_set_update['response_body']['properties']['quota_set'][
+ 'required'].extend(['injected_files',
+ 'injected_file_content_bytes',
+ 'injected_file_path_bytes'])
delete_quota = {
'status_code': [202]
diff --git a/tempest/api_schema/compute/v2/security_groups.py b/tempest/api_schema/compute/v2/security_groups.py
index 6dd44cd..9a852e5 100644
--- a/tempest/api_schema/compute/v2/security_groups.py
+++ b/tempest/api_schema/compute/v2/security_groups.py
@@ -12,6 +12,49 @@
# License for the specific language governing permissions and limitations
# under the License.
+common_security_group_rule = {
+ 'from_port': {'type': ['integer', 'null']},
+ 'to_port': {'type': ['integer', 'null']},
+ 'group': {
+ 'type': 'object',
+ 'properties': {
+ 'tenant_id': {'type': 'string'},
+ 'name': {'type': 'string'}
+ }
+ },
+ 'ip_protocol': {'type': ['string', 'null']},
+ # 'parent_group_id' can be UUID so defining it as 'string' also.
+ 'parent_group_id': {'type': ['string', 'integer', 'null']},
+ 'ip_range': {
+ 'type': 'object',
+ 'properties': {
+ 'cidr': {'type': 'string'}
+ }
+ # When optional argument is provided in request body
+ # like 'group_id' then, attribute 'cidr' does not
+ # comes in response body. So it is not 'required'.
+ },
+ 'id': {'type': ['string', 'integer']}
+}
+
+common_security_group = {
+ 'type': 'object',
+ 'properties': {
+ 'id': {'type': ['integer', 'string']},
+ 'name': {'type': 'string'},
+ 'tenant_id': {'type': 'string'},
+ 'rules': {
+ 'type': 'array',
+ 'items': {
+ 'type': ['object', 'null'],
+ 'properties': common_security_group_rule
+ }
+ },
+ 'description': {'type': 'string'},
+ },
+ 'required': ['id', 'name', 'tenant_id', 'rules', 'description'],
+}
+
list_security_groups = {
'status_code': [200],
'response_body': {
@@ -19,24 +62,28 @@
'properties': {
'security_groups': {
'type': 'array',
- 'items': {
- 'type': 'object',
- 'properties': {
- 'id': {'type': ['integer', 'string']},
- 'name': {'type': 'string'},
- 'tenant_id': {'type': 'string'},
- 'rules': {'type': 'array'},
- 'description': {'type': 'string'},
- },
- 'required': ['id', 'name', 'tenant_id', 'rules',
- 'description'],
- }
+ 'items': common_security_group
}
},
'required': ['security_groups']
}
}
+get_security_group = create_security_group = update_security_group = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'security_group': common_security_group
+ },
+ 'required': ['security_group']
+ }
+}
+
+delete_security_group = {
+ 'status_code': [202]
+}
+
create_security_group_rule = {
'status_code': [200],
'response_body': {
@@ -44,25 +91,7 @@
'properties': {
'security_group_rule': {
'type': 'object',
- 'properties': {
- 'from_port': {'type': 'integer'},
- 'to_port': {'type': 'integer'},
- 'group': {'type': 'object'},
- 'ip_protocol': {'type': 'string'},
- # 'parent_group_id' can be UUID so defining it
- # as 'string' also.
- 'parent_group_id': {'type': ['integer', 'string']},
- 'id': {'type': ['integer', 'string']},
- 'ip_range': {
- 'type': 'object',
- 'properties': {
- 'cidr': {'type': 'string'}
- }
- # When optional argument is provided in request body
- # like 'group_id' then, attribute 'cidr' does not
- # comes in response body. So it is not 'required'.
- }
- },
+ 'properties': common_security_group_rule,
'required': ['from_port', 'to_port', 'group', 'ip_protocol',
'parent_group_id', 'id', 'ip_range']
}
diff --git a/tempest/api_schema/compute/v2/servers.py b/tempest/api_schema/compute/v2/servers.py
index eed4589..981d8f7 100644
--- a/tempest/api_schema/compute/v2/servers.py
+++ b/tempest/api_schema/compute/v2/servers.py
@@ -12,7 +12,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+import copy
+
from tempest.api_schema.compute import parameter_types
+from tempest.api_schema.compute import servers
create_server = {
'status_code': [202],
@@ -43,6 +46,20 @@
}
}
+update_server = copy.deepcopy(servers.base_update_server)
+update_server['response_body']['properties']['server']['properties'].update({
+ 'hostId': {'type': 'string'},
+ 'OS-DCF:diskConfig': {'type': 'string'},
+ 'accessIPv4': parameter_types.access_ip_v4,
+ 'accessIPv6': parameter_types.access_ip_v6
+})
+update_server['response_body']['properties']['server']['required'].append(
+ # NOTE: OS-DCF:diskConfig and accessIPv4/v6 are API
+ # extensions, and some environments return a response
+ # without these attributes. So they are not 'required'.
+ 'hostId'
+)
+
list_virtual_interfaces = {
'status_code': [200],
'response_body': {
@@ -90,3 +107,38 @@
detach_volume = {
'status_code': [202]
}
+
+set_get_server_metadata_item = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'meta': {
+ 'type': 'object',
+ 'patternProperties': {
+ '^.+$': {'type': 'string'}
+ }
+ }
+ },
+ 'required': ['meta']
+ }
+}
+
+list_addresses_by_network = {
+ 'status_code': [200],
+ 'response_body': parameter_types.addresses
+}
+
+server_actions_confirm_resize = copy.deepcopy(
+ servers.server_actions_delete_password)
+
+list_addresses = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'addresses': parameter_types.addresses
+ },
+ 'required': ['addresses']
+ }
+}
diff --git a/tempest/api_schema/compute/v2/tenant_usages.py b/tempest/api_schema/compute/v2/tenant_usages.py
new file mode 100644
index 0000000..0b824a1
--- /dev/null
+++ b/tempest/api_schema/compute/v2/tenant_usages.py
@@ -0,0 +1,92 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+_server_usages = {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'ended_at': {
+ 'oneOf': [
+ {'type': 'string'},
+ {'type': 'null'}
+ ]
+ },
+ 'flavor': {'type': 'string'},
+ 'hours': {'type': 'number'},
+ 'instance_id': {'type': 'string'},
+ 'local_gb': {'type': 'integer'},
+ 'memory_mb': {'type': 'integer'},
+ 'name': {'type': 'string'},
+ 'started_at': {'type': 'string'},
+ 'state': {'type': 'string'},
+ 'tenant_id': {'type': 'string'},
+ 'uptime': {'type': 'integer'},
+ 'vcpus': {'type': 'integer'},
+ },
+ 'required': ['ended_at', 'flavor', 'hours', 'instance_id', 'local_gb',
+ 'memory_mb', 'name', 'started_at', 'state', 'tenant_id',
+ 'uptime', 'vcpus']
+ }
+}
+
+_tenant_usage_list = {
+ 'type': 'object',
+ 'properties': {
+ 'server_usages': _server_usages,
+ 'start': {'type': 'string'},
+ 'stop': {'type': 'string'},
+ 'tenant_id': {'type': 'string'},
+ 'total_hours': {'type': 'number'},
+ 'total_local_gb_usage': {'type': 'number'},
+ 'total_memory_mb_usage': {'type': 'number'},
+ 'total_vcpus_usage': {'type': 'number'},
+ },
+ 'required': ['start', 'stop', 'tenant_id',
+ 'total_hours', 'total_local_gb_usage',
+ 'total_memory_mb_usage', 'total_vcpus_usage']
+}
+
+# 'required' of get_tenant is different from list_tenant's.
+_tenant_usage_get = copy.deepcopy(_tenant_usage_list)
+_tenant_usage_get['required'] = ['server_usages', 'start', 'stop', 'tenant_id',
+ 'total_hours', 'total_local_gb_usage',
+ 'total_memory_mb_usage', 'total_vcpus_usage']
+
+list_tenant = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'tenant_usages': {
+ 'type': 'array',
+ 'items': _tenant_usage_list
+ }
+ },
+ 'required': ['tenant_usages']
+ }
+}
+
+get_tenant = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'tenant_usage': _tenant_usage_get
+ },
+ 'required': ['tenant_usage']
+ }
+}
diff --git a/tempest/api_schema/compute/v3/aggregates.py b/tempest/api_schema/compute/v3/aggregates.py
new file mode 100644
index 0000000..0272641
--- /dev/null
+++ b/tempest/api_schema/compute/v3/aggregates.py
@@ -0,0 +1,29 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.api_schema.compute import aggregates
+
+delete_aggregate = {
+ 'status_code': [204]
+}
+
+create_aggregate = copy.deepcopy(aggregates.common_create_aggregate)
+# V3 API's response status_code is 201
+create_aggregate['status_code'] = [201]
+
+aggregate_add_remove_host = copy.deepcopy(aggregates.aggregate_add_remove_host)
+# V3 API's response status_code is 202
+aggregate_add_remove_host['status_code'] = [202]
diff --git a/tempest/api_schema/compute/v3/certificates.py b/tempest/api_schema/compute/v3/certificates.py
new file mode 100644
index 0000000..0723a16
--- /dev/null
+++ b/tempest/api_schema/compute/v3/certificates.py
@@ -0,0 +1,20 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.api_schema.compute import certificates
+
+create_certificate = copy.deepcopy(certificates._common_schema)
+create_certificate['status_code'] = [201]
diff --git a/tempest/api_schema/compute/v3/flavors.py b/tempest/api_schema/compute/v3/flavors.py
index 468658c..52010f5 100644
--- a/tempest/api_schema/compute/v3/flavors.py
+++ b/tempest/api_schema/compute/v3/flavors.py
@@ -39,3 +39,30 @@
unset_flavor_extra_specs = {
'status_code': [204]
}
+
+get_flavor_details = copy.deepcopy(flavors.common_flavor_details)
+
+# NOTE- In v3 API, 'swap' comes as '0' not empty string '""'
+# (In V2 API, it comes as empty string) So leaving 'swap'as integer type only.
+
+# Defining extra attributes for V3 flavor schema
+get_flavor_details['response_body']['properties']['flavor'][
+ 'properties'].update({'disabled': {'type': 'boolean'},
+ 'ephemeral': {'type': 'integer'},
+ 'flavor-access:is_public': {'type': 'boolean'},
+ 'os-flavor-rxtx:rxtx_factor': {'type': 'number'}})
+
+# 'flavor-access' and 'os-flavor-rxtx' are API extensions.
+# So they are not 'required'.
+get_flavor_details['response_body']['properties']['flavor'][
+ 'required'].extend(['disabled', 'ephemeral'])
+
+
+create_flavor_details = copy.deepcopy(get_flavor_details)
+
+# Overriding the status code for create flavor V3 API.
+create_flavor_details['status_code'] = [201]
+
+delete_flavor = {
+ 'status_code': [204]
+}
diff --git a/tempest/api_schema/compute/v3/hosts.py b/tempest/api_schema/compute/v3/hosts.py
index 2cf8f9b..eb689d1 100644
--- a/tempest/api_schema/compute/v3/hosts.py
+++ b/tempest/api_schema/compute/v3/hosts.py
@@ -13,14 +13,15 @@
# under the License.
import copy
-from tempest.api_schema.compute.v2 import hosts
+
+from tempest.api_schema.compute import hosts
startup_host = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
- 'host': hosts.body
+ 'host': hosts.common_start_up_body
},
'required': ['host']
}
@@ -39,3 +40,14 @@
reboot_host['response_body']['properties']['power_action'] = {
'enum': ['reboot']
}
+
+update_host = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'host': hosts.update_host_common
+ },
+ 'required': ['host']
+ }
+}
diff --git a/tempest/api_schema/compute/v3/interfaces.py b/tempest/api_schema/compute/v3/interfaces.py
new file mode 100644
index 0000000..5e1cee2
--- /dev/null
+++ b/tempest/api_schema/compute/v3/interfaces.py
@@ -0,0 +1,29 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.api_schema.compute import interfaces as common_schema
+
+list_interfaces = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'interface_attachments': {
+ 'type': 'array',
+ 'items': common_schema.interface_common_info
+ }
+ },
+ 'required': ['interface_attachments']
+ }
+}
diff --git a/tempest/api_schema/compute/v3/quotas.py b/tempest/api_schema/compute/v3/quotas.py
index aec1e80..a3212ed 100644
--- a/tempest/api_schema/compute/v3/quotas.py
+++ b/tempest/api_schema/compute/v3/quotas.py
@@ -12,34 +12,15 @@
# License for the specific language governing permissions and limitations
# under the License.
-quota_set = {
- 'status_code': [200],
- 'response_body': {
- 'type': 'object',
- 'properties': {
- 'quota_set': {
- 'type': 'object',
- 'properties': {
- 'id': {'type': 'string'},
- 'instances': {'type': 'integer'},
- 'cores': {'type': 'integer'},
- 'ram': {'type': 'integer'},
- 'floating_ips': {'type': 'integer'},
- 'fixed_ips': {'type': 'integer'},
- 'metadata_items': {'type': 'integer'},
- 'key_pairs': {'type': 'integer'},
- 'security_groups': {'type': 'integer'},
- 'security_group_rules': {'type': 'integer'}
- },
- 'required': ['id', 'instances', 'cores', 'ram',
- 'floating_ips', 'fixed_ips',
- 'metadata_items', 'key_pairs',
- 'security_groups', 'security_group_rules']
- }
- },
- 'required': ['quota_set']
- }
-}
+import copy
+
+from tempest.api_schema.compute import quotas
+
+quota_set = copy.deepcopy(quotas.common_quota_set)
+quota_set['response_body']['properties']['quota_set']['properties'][
+ 'id'] = {'type': 'string'}
+quota_set['response_body']['properties']['quota_set'][
+ 'required'].extend(['id'])
quota_common_info = {
'type': 'object',
@@ -51,34 +32,27 @@
'required': ['reserved', 'limit', 'in_use']
}
-quota_set_detail = {
- 'status_code': [200],
- 'response_body': {
- 'type': 'object',
- 'properties': {
- 'quota_set': {
- 'type': 'object',
- 'properties': {
- 'id': {'type': 'string'},
- 'instances': quota_common_info,
- 'cores': quota_common_info,
- 'ram': quota_common_info,
- 'floating_ips': quota_common_info,
- 'fixed_ips': quota_common_info,
- 'metadata_items': quota_common_info,
- 'key_pairs': quota_common_info,
- 'security_groups': quota_common_info,
- 'security_group_rules': quota_common_info
- },
- 'required': ['id', 'instances', 'cores', 'ram',
- 'floating_ips', 'fixed_ips',
- 'metadata_items', 'key_pairs',
- 'security_groups', 'security_group_rules']
- }
- },
- 'required': ['quota_set']
- }
-}
+quota_set_detail = copy.deepcopy(quotas.common_quota_set)
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+ 'id'] = {'type': 'string'}
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+ 'instances'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+ 'cores'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+ 'ram'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+ 'floating_ips'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+ 'fixed_ips'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+ 'metadata_items'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+ 'key_pairs'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+ 'security_groups'] = quota_common_info
+quota_set_detail['response_body']['properties']['quota_set']['properties'][
+ 'security_group_rules'] = quota_common_info
delete_quota = {
'status_code': [204]
diff --git a/tempest/api_schema/compute/v3/servers.py b/tempest/api_schema/compute/v3/servers.py
index f2a4b78..6716249 100644
--- a/tempest/api_schema/compute/v3/servers.py
+++ b/tempest/api_schema/compute/v3/servers.py
@@ -12,7 +12,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+import copy
+
from tempest.api_schema.compute import parameter_types
+from tempest.api_schema.compute import servers
create_server = {
'status_code': [202],
@@ -29,8 +32,8 @@
'os-security-groups:security_groups': {'type': 'array'},
'links': parameter_types.links,
'admin_password': {'type': 'string'},
- 'os-access-ips:access_ip_v4': {'type': 'string'},
- 'os-access-ips:access_ip_v6': {'type': 'string'}
+ 'os-access-ips:access_ip_v4': parameter_types.access_ip_v4,
+ 'os-access-ips:access_ip_v6': parameter_types.access_ip_v6
},
# NOTE: os-access-ips:access_ip_v4/v6 are API extension,
# and some environments return a response without these
@@ -43,6 +46,56 @@
}
}
+addresses_v3 = copy.deepcopy(parameter_types.addresses)
+addresses_v3['patternProperties']['^[a-zA-Z0-9-_.]+$']['items'][
+ 'properties'].update({
+ 'type': {'type': 'string'},
+ 'mac_addr': {'type': 'string'}
+ })
+addresses_v3['patternProperties']['^[a-zA-Z0-9-_.]+$']['items'][
+ 'required'].extend(
+ ['type', 'mac_addr']
+ )
+
+update_server = copy.deepcopy(servers.base_update_server)
+update_server['response_body']['properties']['server']['properties'].update({
+ 'addresses': addresses_v3,
+ 'host_id': {'type': 'string'},
+ 'os-access-ips:access_ip_v4': parameter_types.access_ip_v4,
+ 'os-access-ips:access_ip_v6': parameter_types.access_ip_v6
+})
+update_server['response_body']['properties']['server']['required'].append(
+ # NOTE: os-access-ips:access_ip_v4/v6 are API extension,
+ # and some environments return a response without these
+ # attributes. So they are not 'required'.
+ 'host_id'
+)
+
attach_detach_volume = {
'status_code': [202]
}
+
+set_get_server_metadata_item = copy.deepcopy(servers.set_server_metadata)
+
+list_addresses_by_network = {
+ 'status_code': [200],
+ 'response_body': addresses_v3
+}
+
+server_actions_change_password = copy.deepcopy(
+ servers.server_actions_delete_password)
+
+list_addresses = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'addresses': addresses_v3
+ },
+ 'required': ['addresses']
+ }
+}
+
+update_server_metadata = copy.deepcopy(servers.update_server_metadata)
+# V3 API's response status_code is 201
+update_server_metadata['status_code'] = [201]
diff --git a/tempest/api/compute/v3/certificates/__init__.py b/tempest/api_schema/queuing/__init__.py
similarity index 100%
copy from tempest/api/compute/v3/certificates/__init__.py
copy to tempest/api_schema/queuing/__init__.py
diff --git a/tempest/api/compute/v3/certificates/__init__.py b/tempest/api_schema/queuing/v1/__init__.py
similarity index 100%
copy from tempest/api/compute/v3/certificates/__init__.py
copy to tempest/api_schema/queuing/v1/__init__.py
diff --git a/tempest/api_schema/queuing/v1/queues.py b/tempest/api_schema/queuing/v1/queues.py
new file mode 100644
index 0000000..4630e1c
--- /dev/null
+++ b/tempest/api_schema/queuing/v1/queues.py
@@ -0,0 +1,98 @@
+
+# Copyright (c) 2014 Rackspace, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+list_link = {
+ 'type': 'object',
+ 'properties': {
+ 'rel': {'type': 'string'},
+ 'href': {
+ 'type': 'string',
+ 'format': 'uri'
+ }
+ },
+ 'required': ['href', 'rel']
+}
+
+list_queue = {
+ 'type': 'object',
+ 'properties': {
+ 'name': {'type': 'string'},
+ 'href': {
+ 'type': 'string',
+ 'format': 'uri'
+ },
+ 'metadata': {'type': 'object'}
+ },
+ 'required': ['name', 'href']
+}
+
+list_queues = {
+ 'status_code': [200, 204],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'links': {
+ 'type': 'array',
+ 'items': list_link,
+ 'maxItems': 1
+ },
+ 'queues': {
+ 'type': 'array',
+ 'items': list_queue
+ }
+ },
+ 'required': ['links', 'queues']
+ }
+}
+
+message_link = {
+ 'type': 'object',
+ 'properties': {
+ 'href': {
+ 'type': 'string',
+ 'format': 'uri'
+ },
+ 'age': {'type': 'number'},
+ 'created': {
+ 'type': 'string',
+ 'format': 'date-time'
+ }
+ },
+ 'required': ['href', 'age', 'created']
+}
+
+messages = {
+ 'type': 'object',
+ 'properties': {
+ 'free': {'type': 'number'},
+ 'claimed': {'type': 'number'},
+ 'total': {'type': 'number'},
+ 'oldest': message_link,
+ 'newest': message_link
+ },
+ 'required': ['free', 'claimed', 'total']
+}
+
+queue_stats = {
+ 'status_code': [200],
+ 'response_body': {
+ 'type': 'object',
+ 'properties': {
+ 'messages': messages
+ },
+ 'required': ['messages']
+ }
+}
diff --git a/tempest/auth.py b/tempest/auth.py
index 5fc923f..9c51edb 100644
--- a/tempest/auth.py
+++ b/tempest/auth.py
@@ -43,11 +43,11 @@
:param client_type: 'tempest' or 'official'
:param interface: 'json' or 'xml'. Applicable for tempest client only
"""
+ credentials = self._convert_credentials(credentials)
if self.check_credentials(credentials):
self.credentials = credentials
else:
raise TypeError("Invalid credentials")
- self.credentials = credentials
self.client_type = client_type
self.interface = interface
if self.client_type == 'tempest' and self.interface is None:
@@ -56,6 +56,13 @@
self.alt_auth_data = None
self.alt_part = None
+ def _convert_credentials(self, credentials):
+ # Support dict credentials for backwards compatibility
+ if isinstance(credentials, dict):
+ return get_credentials(**credentials)
+ else:
+ return credentials
+
def __str__(self):
return "Creds :{creds}, client type: {client_type}, interface: " \
"{interface}, cached auth data: {cache}".format(
@@ -73,29 +80,55 @@
def _get_auth(self):
raise NotImplementedError
+ def _fill_credentials(self, auth_data_body):
+ raise NotImplementedError
+
+ def fill_credentials(self):
+ """
+ Fill credentials object with data from auth
+ """
+ auth_data = self.get_auth()
+ self._fill_credentials(auth_data[1])
+ return self.credentials
+
@classmethod
def check_credentials(cls, credentials):
"""
- Verify credentials are valid. Subclasses can do a better check.
+ Verify credentials are valid.
"""
- return isinstance(credentials, dict)
+ return isinstance(credentials, Credentials) and credentials.is_valid()
@property
def auth_data(self):
- if self.cache is None or self.is_expired(self.cache):
- self.cache = self._get_auth()
- return self.cache
+ return self.get_auth()
@auth_data.deleter
def auth_data(self):
self.clear_auth()
+ def get_auth(self):
+ """
+ Returns auth from cache if available, else auth first
+ """
+ if self.cache is None or self.is_expired(self.cache):
+ self.set_auth()
+ return self.cache
+
+ def set_auth(self):
+ """
+ Forces setting auth, ignores cache if it exists.
+ Refills credentials
+ """
+ self.cache = self._get_auth()
+ self._fill_credentials(self.cache[1])
+
def clear_auth(self):
"""
Can be called to clear the access cache so that next request
will fetch a new token and base_url.
"""
self.cache = None
+ self.credentials.reset()
def is_expired(self, auth_data):
raise NotImplementedError
@@ -218,16 +251,6 @@
EXPIRY_DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
- @classmethod
- def check_credentials(cls, credentials, scoped=True):
- # tenant_name is optional if not scoped
- valid = super(KeystoneV2AuthProvider, cls).check_credentials(
- credentials) and 'username' in credentials and \
- 'password' in credentials
- if scoped:
- valid = valid and 'tenant_name' in credentials
- return valid
-
def _auth_client(self):
if self.client_type == 'tempest':
if self.interface == 'json':
@@ -240,13 +263,25 @@
def _auth_params(self):
if self.client_type == 'tempest':
return dict(
- user=self.credentials['username'],
- password=self.credentials['password'],
- tenant=self.credentials.get('tenant_name', None),
+ user=self.credentials.username,
+ password=self.credentials.password,
+ tenant=self.credentials.tenant_name,
auth_data=True)
else:
raise NotImplementedError
+ def _fill_credentials(self, auth_data_body):
+ tenant = auth_data_body['token']['tenant']
+ user = auth_data_body['user']
+ if self.credentials.tenant_name is None:
+ self.credentials.tenant_name = tenant['name']
+ if self.credentials.tenant_id is None:
+ self.credentials.tenant_id = tenant['id']
+ if self.credentials.username is None:
+ self.credentials.username = user['name']
+ if self.credentials.user_id is None:
+ self.credentials.user_id = user['id']
+
def base_url(self, filters, auth_data=None):
"""
Filters can be:
@@ -303,16 +338,6 @@
EXPIRY_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
- @classmethod
- def check_credentials(cls, credentials, scoped=True):
- # tenant_name is optional if not scoped
- valid = super(KeystoneV3AuthProvider, cls).check_credentials(
- credentials) and 'username' in credentials and \
- 'password' in credentials and 'domain_name' in credentials
- if scoped:
- valid = valid and 'tenant_name' in credentials
- return valid
-
def _auth_client(self):
if self.client_type == 'tempest':
if self.interface == 'json':
@@ -325,14 +350,47 @@
def _auth_params(self):
if self.client_type == 'tempest':
return dict(
- user=self.credentials['username'],
- password=self.credentials['password'],
- tenant=self.credentials.get('tenant_name', None),
- domain=self.credentials['domain_name'],
+ user=self.credentials.username,
+ password=self.credentials.password,
+ tenant=self.credentials.tenant_name,
+ domain=self.credentials.user_domain_name,
auth_data=True)
else:
raise NotImplementedError
+ def _fill_credentials(self, auth_data_body):
+ # project or domain, depending on the scope
+ project = auth_data_body.get('project', None)
+ domain = auth_data_body.get('domain', None)
+ # user is always there
+ user = auth_data_body['user']
+ # Set project fields
+ if project is not None:
+ if self.credentials.project_name is None:
+ self.credentials.project_name = project['name']
+ if self.credentials.project_id is None:
+ self.credentials.project_id = project['id']
+ if self.credentials.project_domain_id is None:
+ self.credentials.project_domain_id = project['domain']['id']
+ if self.credentials.project_domain_name is None:
+ self.credentials.project_domain_name = \
+ project['domain']['name']
+ # Set domain fields
+ if domain is not None:
+ if self.credentials.domain_id is None:
+ self.credentials.domain_id = domain['id']
+ if self.credentials.domain_name is None:
+ self.credentials.domain_name = domain['name']
+ # Set user fields
+ if self.credentials.username is None:
+ self.credentials.username = user['name']
+ if self.credentials.user_id is None:
+ self.credentials.user_id = user['id']
+ if self.credentials.user_domain_id is None:
+ self.credentials.user_domain_id = user['domain']['id']
+ if self.credentials.user_domain_name is None:
+ self.credentials.user_domain_name = user['domain']['name']
+
def base_url(self, filters, auth_data=None):
"""
Filters can be:
@@ -398,3 +456,248 @@
self.EXPIRY_DATE_FORMAT)
return expiry - self.token_expiry_threshold <= \
datetime.datetime.utcnow()
+
+
+def get_default_credentials(credential_type, fill_in=True):
+ """
+ Returns configured credentials of the specified type
+ based on the configured auth_version
+ """
+ return get_credentials(fill_in=fill_in, credential_type=credential_type)
+
+
+def get_credentials(credential_type=None, fill_in=True, **kwargs):
+ """
+ Builds a credentials object based on the configured auth_version
+
+ :param credential_type (string): requests credentials from tempest
+ configuration file. Valid values are defined in
+ Credentials.TYPE.
+ :param kwargs (dict): take into account only if credential_type is
+ not specified or None. Dict of credential key/value pairs
+
+ Examples:
+
+ Returns credentials from the provided parameters:
+ >>> get_credentials(username='foo', password='bar')
+
+ Returns credentials from tempest configuration:
+ >>> get_credentials(credential_type='user')
+ """
+ if CONF.identity.auth_version == 'v2':
+ credential_class = KeystoneV2Credentials
+ auth_provider_class = KeystoneV2AuthProvider
+ elif CONF.identity.auth_version == 'v3':
+ credential_class = KeystoneV3Credentials
+ auth_provider_class = KeystoneV3AuthProvider
+ else:
+ raise exceptions.InvalidConfiguration('Unsupported auth version')
+ if credential_type is not None:
+ creds = credential_class.get_default(credential_type)
+ else:
+ creds = credential_class(**kwargs)
+ # Fill in the credentials fields that were not specified
+ if fill_in:
+ auth_provider = auth_provider_class(creds)
+ creds = auth_provider.fill_credentials()
+ return creds
+
+
+class Credentials(object):
+ """
+ Set of credentials for accessing OpenStack services
+
+ ATTRIBUTES: list of valid class attributes representing credentials.
+
+ TYPES: types of credentials available in the configuration file.
+ For each key there's a tuple (section, prefix) to match the
+ configuration options.
+ """
+
+ ATTRIBUTES = []
+ TYPES = {
+ 'identity_admin': ('identity', 'admin'),
+ 'compute_admin': ('compute_admin', None),
+ 'user': ('identity', None),
+ 'alt_user': ('identity', 'alt')
+ }
+
+ def __init__(self, **kwargs):
+ """
+ Enforce the available attributes at init time (only).
+ Additional attributes can still be set afterwards if tests need
+ to do so.
+ """
+ self._initial = kwargs
+ self._apply_credentials(kwargs)
+
+ def _apply_credentials(self, attr):
+ for key in attr.keys():
+ if key in self.ATTRIBUTES:
+ setattr(self, key, attr[key])
+ else:
+ raise exceptions.InvalidCredentials
+
+ def __str__(self):
+ """
+ Represent only attributes included in self.ATTRIBUTES
+ """
+ _repr = dict((k, getattr(self, k)) for k in self.ATTRIBUTES)
+ return str(_repr)
+
+ def __eq__(self, other):
+ """
+ Credentials are equal if attributes in self.ATTRIBUTES are equal
+ """
+ return str(self) == str(other)
+
+ def __getattr__(self, key):
+ # If an attribute is set, __getattr__ is not invoked
+ # If an attribute is not set, and it is a known one, return None
+ if key in self.ATTRIBUTES:
+ return None
+ else:
+ raise AttributeError
+
+ def __delitem__(self, key):
+ # For backwards compatibility, support dict behaviour
+ if key in self.ATTRIBUTES:
+ delattr(self, key)
+ else:
+ raise AttributeError
+
+ def get(self, item, default):
+ # In this patch act as dict for backward compatibility
+ try:
+ return getattr(self, item)
+ except AttributeError:
+ return default
+
+ @classmethod
+ def get_default(cls, credentials_type):
+ if credentials_type not in cls.TYPES:
+ raise exceptions.InvalidCredentials()
+ creds = cls._get_default(credentials_type)
+ if not creds.is_valid():
+ raise exceptions.InvalidConfiguration()
+ return creds
+
+ @classmethod
+ def _get_default(cls, credentials_type):
+ raise NotImplementedError
+
+ def is_valid(self):
+ raise NotImplementedError
+
+ def reset(self):
+ # First delete all known attributes
+ for key in self.ATTRIBUTES:
+ if getattr(self, key) is not None:
+ delattr(self, key)
+ # Then re-apply initial setup
+ self._apply_credentials(self._initial)
+
+
+class KeystoneV2Credentials(Credentials):
+
+ CONF_ATTRIBUTES = ['username', 'password', 'tenant_name']
+ ATTRIBUTES = ['user_id', 'tenant_id']
+ ATTRIBUTES.extend(CONF_ATTRIBUTES)
+
+ @classmethod
+ def _get_default(cls, credentials_type='user'):
+ params = {}
+ section, prefix = cls.TYPES[credentials_type]
+ for attr in cls.CONF_ATTRIBUTES:
+ _section = getattr(CONF, section)
+ if prefix is None:
+ params[attr] = getattr(_section, attr)
+ else:
+ params[attr] = getattr(_section, prefix + "_" + attr)
+ return cls(**params)
+
+ def is_valid(self):
+ """
+ Minimum set of valid credentials, are username and password.
+ Tenant is optional.
+ """
+ return None not in (self.username, self.password)
+
+
+class KeystoneV3Credentials(KeystoneV2Credentials):
+ """
+ Credentials suitable for the Keystone Identity V3 API
+ """
+
+ CONF_ATTRIBUTES = ['domain_name', 'password', 'tenant_name', 'username']
+ ATTRIBUTES = ['project_domain_id', 'project_domain_name', 'project_id',
+ 'project_name', 'tenant_id', 'tenant_name', 'user_domain_id',
+ 'user_domain_name', 'user_id']
+ ATTRIBUTES.extend(CONF_ATTRIBUTES)
+
+ def __init__(self, **kwargs):
+ """
+ If domain is not specified, load the one configured for the
+ identity manager.
+ """
+ domain_fields = set(x for x in self.ATTRIBUTES if 'domain' in x)
+ if not domain_fields.intersection(kwargs.keys()):
+ kwargs['user_domain_name'] = CONF.identity.admin_domain_name
+ super(KeystoneV3Credentials, self).__init__(**kwargs)
+
+ def __setattr__(self, key, value):
+ parent = super(KeystoneV3Credentials, self)
+ # for tenant_* set both project and tenant
+ if key == 'tenant_id':
+ parent.__setattr__('project_id', value)
+ elif key == 'tenant_name':
+ parent.__setattr__('project_name', value)
+ # for project_* set both project and tenant
+ if key == 'project_id':
+ parent.__setattr__('tenant_id', value)
+ elif key == 'project_name':
+ parent.__setattr__('tenant_name', value)
+ # for *_domain_* set both user and project if not set yet
+ if key == 'user_domain_id':
+ if self.project_domain_id is None:
+ parent.__setattr__('project_domain_id', value)
+ if key == 'project_domain_id':
+ if self.user_domain_id is None:
+ parent.__setattr__('user_domain_id', value)
+ if key == 'user_domain_name':
+ if self.project_domain_name is None:
+ parent.__setattr__('project_domain_name', value)
+ if key == 'project_domain_name':
+ if self.user_domain_name is None:
+ parent.__setattr__('user_domain_name', value)
+ # support domain_name coming from config
+ if key == 'domain_name':
+ parent.__setattr__('user_domain_name', value)
+ parent.__setattr__('project_domain_name', value)
+ # finally trigger default behaviour for all attributes
+ parent.__setattr__(key, value)
+
+ def is_valid(self):
+ """
+ Valid combinations of v3 credentials (excluding token, scope)
+ - User id, password (optional domain)
+ - User name, password and its domain id/name
+ For the scope, valid combinations are:
+ - None
+ - Project id (optional domain)
+ - Project name and its domain id/name
+ """
+ valid_user_domain = any(
+ [self.user_domain_id is not None,
+ self.user_domain_name is not None])
+ valid_project_domain = any(
+ [self.project_domain_id is not None,
+ self.project_domain_name is not None])
+ valid_user = any(
+ [self.user_id is not None,
+ self.username is not None and valid_user_domain])
+ valid_project = any(
+ [self.project_name is None and self.project_id is None,
+ self.project_id is not None,
+ self.project_name is not None and valid_project_domain])
+ return all([self.password is not None, valid_user, valid_project])
diff --git a/tempest/cli/simple_read_only/test_heat.py b/tempest/cli/simple_read_only/test_heat.py
index cf4580c..7a952fc 100644
--- a/tempest/cli/simple_read_only/test_heat.py
+++ b/tempest/cli/simple_read_only/test_heat.py
@@ -85,6 +85,9 @@
def test_heat_help(self):
self.heat('help')
+ def test_heat_bash_completion(self):
+ self.heat('bash-completion')
+
def test_heat_help_cmd(self):
# Check requesting help for a specific command works
help_text = self.heat('help resource-template')
diff --git a/tempest/cli/simple_read_only/test_nova.py b/tempest/cli/simple_read_only/test_nova.py
index a3787ab..1c1ddf1 100644
--- a/tempest/cli/simple_read_only/test_nova.py
+++ b/tempest/cli/simple_read_only/test_nova.py
@@ -145,6 +145,9 @@
def test_admin_secgroup_list_rules(self):
self.nova('secgroup-list-rules')
+ def test_admin_server_group_list(self):
+ self.nova('server-group-list')
+
def test_admin_servce_list(self):
self.nova('service-list')
diff --git a/tempest/clients.py b/tempest/clients.py
index 0ebbd7c..7532bf2 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -16,7 +16,8 @@
import keystoneclient.exceptions
import keystoneclient.v2_0.client
-from tempest.common.rest_client import NegativeRestClient
+from tempest import auth
+from tempest.common import rest_client
from tempest import config
from tempest import exceptions
from tempest import manager
@@ -116,6 +117,8 @@
from tempest.services.data_processing.v1_1.client import DataProcessingClient
from tempest.services.database.json.flavors_client import \
DatabaseFlavorsClientJSON
+from tempest.services.database.json.versions_client import \
+ DatabaseVersionsClientJSON
from tempest.services.identity.json.identity_client import IdentityClientJSON
from tempest.services.identity.json.identity_client import TokenClientJSON
from tempest.services.identity.v3.json.credentials_client import \
@@ -126,6 +129,7 @@
IdentityV3ClientJSON
from tempest.services.identity.v3.json.identity_client import V3TokenClientJSON
from tempest.services.identity.v3.json.policy_client import PolicyClientJSON
+from tempest.services.identity.v3.json.region_client import RegionClientJSON
from tempest.services.identity.v3.json.service_client import \
ServiceClientJSON
from tempest.services.identity.v3.xml.credentials_client import \
@@ -135,6 +139,7 @@
IdentityV3ClientXML
from tempest.services.identity.v3.xml.identity_client import V3TokenClientXML
from tempest.services.identity.v3.xml.policy_client import PolicyClientXML
+from tempest.services.identity.v3.xml.region_client import RegionClientXML
from tempest.services.identity.v3.xml.service_client import \
ServiceClientXML
from tempest.services.identity.xml.identity_client import IdentityClientXML
@@ -161,8 +166,12 @@
VolumeHostsClientJSON
from tempest.services.volume.json.admin.volume_quotas_client import \
VolumeQuotasClientJSON
+from tempest.services.volume.json.admin.volume_services_client import \
+ VolumesServicesClientJSON
from tempest.services.volume.json.admin.volume_types_client import \
VolumeTypesClientJSON
+from tempest.services.volume.json.availability_zone_client import \
+ VolumeAvailabilityZoneClientJSON
from tempest.services.volume.json.backups_client import BackupsClientJSON
from tempest.services.volume.json.extensions_client import \
ExtensionsClientJSON as VolumeExtensionClientJSON
@@ -174,8 +183,12 @@
VolumeHostsClientXML
from tempest.services.volume.xml.admin.volume_quotas_client import \
VolumeQuotasClientXML
+from tempest.services.volume.xml.admin.volume_services_client import \
+ VolumesServicesClientXML
from tempest.services.volume.xml.admin.volume_types_client import \
VolumeTypesClientXML
+from tempest.services.volume.xml.availability_zone_client import \
+ VolumeAvailabilityZoneClientXML
from tempest.services.volume.xml.backups_client import BackupsClientXML
from tempest.services.volume.xml.extensions_client import \
ExtensionsClientXML as VolumeExtensionClientXML
@@ -192,22 +205,12 @@
Top level manager for OpenStack tempest clients
"""
- def __init__(self, username=None, password=None, tenant_name=None,
- interface='json', service=None):
- """
- We allow overriding of the credentials used within the various
- client classes managed by the Manager object. Left as None, the
- standard username/password/tenant_name is used.
-
- :param username: Override of the username
- :param password: Override of the password
- :param tenant_name: Override of the tenant name
- """
+ def __init__(self, credentials=None, interface='json', service=None):
+ # Set interface and client type first
self.interface = interface
self.client_type = 'tempest'
# super cares for credentials validation
- super(Manager, self).__init__(
- username=username, password=password, tenant_name=tenant_name)
+ super(Manager, self).__init__(credentials=credentials)
if self.interface == 'xml':
self.certificates_client = CertificatesClientXML(
@@ -240,11 +243,14 @@
self.availability_zone_client = AvailabilityZoneClientXML(
self.auth_provider)
self.service_client = ServiceClientXML(self.auth_provider)
+ self.volume_services_client = VolumesServicesClientXML(
+ self.auth_provider)
self.aggregates_client = AggregatesClientXML(self.auth_provider)
self.services_client = ServicesClientXML(self.auth_provider)
self.tenant_usages_client = TenantUsagesClientXML(
self.auth_provider)
self.policy_client = PolicyClientXML(self.auth_provider)
+ self.region_client = RegionClientXML(self.auth_provider)
self.hosts_client = HostsClientXML(self.auth_provider)
self.hypervisor_client = HypervisorClientXML(self.auth_provider)
self.network_client = NetworkClientXML(self.auth_provider)
@@ -263,6 +269,8 @@
self.auth_provider)
self.token_client = TokenClientXML()
self.token_v3_client = V3TokenClientXML()
+ self.volume_availability_zone_client = \
+ VolumeAvailabilityZoneClientXML(self.auth_provider)
elif self.interface == 'json':
self.certificates_client = CertificatesClientJSON(
@@ -315,6 +323,8 @@
self.services_v3_client = ServicesV3ClientJSON(
self.auth_provider)
self.service_client = ServiceClientJSON(self.auth_provider)
+ self.volume_services_client = VolumesServicesClientJSON(
+ self.auth_provider)
self.agents_v3_client = AgentsV3ClientJSON(self.auth_provider)
self.aggregates_v3_client = AggregatesV3ClientJSON(
self.auth_provider)
@@ -327,6 +337,7 @@
self.migrations_v3_client = MigrationsV3ClientJSON(
self.auth_provider)
self.policy_client = PolicyClientJSON(self.auth_provider)
+ self.region_client = RegionClientJSON(self.auth_provider)
self.hosts_client = HostsClientJSON(self.auth_provider)
self.hypervisor_v3_client = HypervisorV3ClientJSON(
self.auth_provider)
@@ -346,24 +357,29 @@
self.hosts_v3_client = HostsV3ClientJSON(self.auth_provider)
self.database_flavors_client = DatabaseFlavorsClientJSON(
self.auth_provider)
+ self.database_versions_client = DatabaseVersionsClientJSON(
+ self.auth_provider)
self.queuing_client = QueuingClientJSON(self.auth_provider)
if CONF.service_available.ceilometer:
self.telemetry_client = TelemetryClientJSON(
self.auth_provider)
self.token_client = TokenClientJSON()
self.token_v3_client = V3TokenClientJSON()
- self.negative_client = NegativeRestClient(self.auth_provider)
+ self.negative_client = rest_client.NegativeRestClient(
+ self.auth_provider)
self.negative_client.service = service
+ self.volume_availability_zone_client = \
+ VolumeAvailabilityZoneClientJSON(self.auth_provider)
else:
msg = "Unsupported interface type `%s'" % interface
raise exceptions.InvalidConfiguration(msg)
# TODO(andreaf) EC2 client still do their auth, v2 only
- ec2_client_args = (self.credentials.get('username'),
- self.credentials.get('password'),
+ ec2_client_args = (self.credentials.username,
+ self.credentials.password,
CONF.identity.uri,
- self.credentials.get('tenant_name'))
+ self.credentials.tenant_name)
# common clients
self.account_client = AccountClient(self.auth_provider)
@@ -394,11 +410,10 @@
"""
def __init__(self, interface='json', service=None):
- super(AltManager, self).__init__(CONF.identity.alt_username,
- CONF.identity.alt_password,
- CONF.identity.alt_tenant_name,
- interface=interface,
- service=service)
+ super(AltManager, self).__init__(
+ credentials=auth.get_default_credentials('alt_user'),
+ interface=interface,
+ service=service)
class AdminManager(Manager):
@@ -409,11 +424,10 @@
"""
def __init__(self, interface='json', service=None):
- super(AdminManager, self).__init__(CONF.identity.admin_username,
- CONF.identity.admin_password,
- CONF.identity.admin_tenant_name,
- interface=interface,
- service=service)
+ super(AdminManager, self).__init__(
+ credentials=auth.get_default_credentials('identity_admin'),
+ interface=interface,
+ service=service)
class ComputeAdminManager(Manager):
@@ -425,29 +439,10 @@
def __init__(self, interface='json', service=None):
base = super(ComputeAdminManager, self)
- base.__init__(CONF.compute_admin.username,
- CONF.compute_admin.password,
- CONF.compute_admin.tenant_name,
- interface=interface,
- service=service)
-
-
-class OrchestrationManager(Manager):
- """
- Manager object that uses the admin credentials for its
- so that heat templates can create users
- """
- def __init__(self, interface='json', service=None):
- base = super(OrchestrationManager, self)
- # heat currently needs an admin user so that stacks can create users
- # however the tests need the demo tenant so that the neutron
- # private network is the default. DO NOT change this auth combination
- # until heat can run with the demo user.
- base.__init__(CONF.identity.admin_username,
- CONF.identity.admin_password,
- CONF.identity.tenant_name,
- interface=interface,
- service=service)
+ base.__init__(
+ credentials=auth.get_default_credentials('compute_admin'),
+ interface=interface,
+ service=service)
class OfficialClientManager(manager.Manager):
@@ -462,47 +457,32 @@
IRONICCLIENT_VERSION = '1'
SAHARACLIENT_VERSION = '1.1'
- def __init__(self, username, password, tenant_name):
+ def __init__(self, credentials):
# FIXME(andreaf) Auth provider for client_type 'official' is
# not implemented yet, setting to 'tempest' for now.
self.client_type = 'tempest'
self.interface = None
# super cares for credentials validation
- super(OfficialClientManager, self).__init__(
- username=username, password=password, tenant_name=tenant_name)
+ super(OfficialClientManager, self).__init__(credentials=credentials)
self.baremetal_client = self._get_baremetal_client()
- self.compute_client = self._get_compute_client(username,
- password,
- tenant_name)
- self.identity_client = self._get_identity_client(username,
- password,
- tenant_name)
+ self.compute_client = self._get_compute_client(credentials)
+ self.identity_client = self._get_identity_client(credentials)
self.image_client = self._get_image_client()
self.network_client = self._get_network_client()
- self.volume_client = self._get_volume_client(username,
- password,
- tenant_name)
+ self.volume_client = self._get_volume_client(credentials)
self.object_storage_client = self._get_object_storage_client(
- username,
- password,
- tenant_name)
+ credentials)
self.orchestration_client = self._get_orchestration_client(
- username,
- password,
- tenant_name)
+ credentials)
self.data_processing_client = self._get_data_processing_client(
- username,
- password,
- tenant_name)
+ credentials)
def _get_roles(self):
- keystone_admin = self._get_identity_client(
- CONF.identity.admin_username,
- CONF.identity.admin_password,
- CONF.identity.admin_tenant_name)
+ admin_credentials = auth.get_default_credentials('identity_admin')
+ keystone_admin = self._get_identity_client(admin_credentials)
- username = self.credentials['username']
- tenant_name = self.credentials['tenant_name']
+ username = self.credentials.username
+ tenant_name = self.credentials.tenant_name
user_id = keystone_admin.users.find(name=username).id
tenant_id = keystone_admin.tenants.find(name=tenant_name).id
@@ -511,20 +491,20 @@
return [r.name for r in roles]
- def _get_compute_client(self, username, password, tenant_name):
+ def _get_compute_client(self, credentials):
# Novaclient will not execute operations for anyone but the
# identified user, so a new client needs to be created for
# each user that operations need to be performed for.
if not CONF.service_available.nova:
return None
import novaclient.client
- self._validate_credentials(username, password, tenant_name)
auth_url = CONF.identity.uri
dscv = CONF.identity.disable_ssl_certificate_validation
region = CONF.identity.region
- client_args = (username, password, tenant_name, auth_url)
+ client_args = (credentials.username, credentials.password,
+ credentials.tenant_name, auth_url)
# Create our default Nova client to use in testing
service_type = CONF.compute.catalog_type
@@ -552,7 +532,7 @@
return glanceclient.Client('1', endpoint=endpoint, token=token,
insecure=dscv)
- def _get_volume_client(self, username, password, tenant_name):
+ def _get_volume_client(self, credentials):
if not CONF.service_available.cinder:
return None
import cinderclient.client
@@ -561,25 +541,23 @@
endpoint_type = CONF.volume.endpoint_type
dscv = CONF.identity.disable_ssl_certificate_validation
return cinderclient.client.Client(self.CINDERCLIENT_VERSION,
- username,
- password,
- tenant_name,
+ credentials.username,
+ credentials.password,
+ credentials.tenant_name,
auth_url,
region_name=region,
endpoint_type=endpoint_type,
insecure=dscv,
http_log_debug=True)
- def _get_object_storage_client(self, username, password, tenant_name):
+ def _get_object_storage_client(self, credentials):
if not CONF.service_available.swift:
return None
import swiftclient
auth_url = CONF.identity.uri
# add current tenant to swift operator role group.
- keystone_admin = self._get_identity_client(
- CONF.identity.admin_username,
- CONF.identity.admin_password,
- CONF.identity.admin_tenant_name)
+ admin_credentials = auth.get_default_credentials('identity_admin')
+ keystone_admin = self._get_identity_client(admin_credentials)
# enable test user to operate swift by adding operator role to him.
roles = keystone_admin.roles.list()
@@ -596,26 +574,18 @@
endpoint_type = CONF.object_storage.endpoint_type
os_options = {'endpoint_type': endpoint_type}
- return swiftclient.Connection(auth_url, username, password,
- tenant_name=tenant_name,
+ return swiftclient.Connection(auth_url, credentials.username,
+ credentials.password,
+ tenant_name=credentials.tenant_name,
auth_version='2',
os_options=os_options)
- def _get_orchestration_client(self, username=None, password=None,
- tenant_name=None):
+ def _get_orchestration_client(self, credentials):
if not CONF.service_available.heat:
return None
import heatclient.client
- if not username:
- username = CONF.identity.admin_username
- if not password:
- password = CONF.identity.admin_password
- if not tenant_name:
- tenant_name = CONF.identity.tenant_name
- self._validate_credentials(username, password, tenant_name)
-
- keystone = self._get_identity_client(username, password, tenant_name)
+ keystone = self._get_identity_client(credentials)
region = CONF.identity.region
endpoint_type = CONF.orchestration.endpoint_type
token = keystone.auth_token
@@ -632,22 +602,22 @@
return heatclient.client.Client(self.HEATCLIENT_VERSION,
endpoint,
token=token,
- username=username,
- password=password)
+ username=credentials.username,
+ password=credentials.password)
- def _get_identity_client(self, username, password, tenant_name):
+ def _get_identity_client(self, credentials):
# This identity client is not intended to check the security
# of the identity service, so use admin credentials by default.
- self._validate_credentials(username, password, tenant_name)
auth_url = CONF.identity.uri
dscv = CONF.identity.disable_ssl_certificate_validation
- return keystoneclient.v2_0.client.Client(username=username,
- password=password,
- tenant_name=tenant_name,
- auth_url=auth_url,
- insecure=dscv)
+ return keystoneclient.v2_0.client.Client(
+ username=credentials.username,
+ password=credentials.password,
+ tenant_name=credentials.tenant_name,
+ auth_url=auth_url,
+ insecure=dscv)
def _get_baremetal_client(self):
# ironic client is currently intended to by used by admin users
@@ -664,9 +634,9 @@
service_type = CONF.baremetal.catalog_type
endpoint_type = CONF.baremetal.endpoint_type
creds = {
- 'os_username': self.credentials['username'],
- 'os_password': self.credentials['password'],
- 'os_tenant_name': self.credentials['tenant_name']
+ 'os_username': self.credentials.username,
+ 'os_password': self.credentials.password,
+ 'os_tenant_name': self.credentials.tenant_name
}
try:
@@ -690,41 +660,39 @@
if not CONF.service_available.neutron:
return None
import neutronclient.v2_0.client
- username = CONF.identity.admin_username
- password = CONF.identity.admin_password
- tenant_name = CONF.identity.admin_tenant_name
- self._validate_credentials(username, password, tenant_name)
+ credentials = auth.get_default_credentials('identity_admin')
auth_url = CONF.identity.uri
dscv = CONF.identity.disable_ssl_certificate_validation
endpoint_type = CONF.network.endpoint_type
- return neutronclient.v2_0.client.Client(username=username,
- password=password,
- tenant_name=tenant_name,
- endpoint_type=endpoint_type,
- auth_url=auth_url,
- insecure=dscv)
+ return neutronclient.v2_0.client.Client(
+ username=credentials.username,
+ password=credentials.password,
+ tenant_name=credentials.tenant_name,
+ endpoint_type=endpoint_type,
+ auth_url=auth_url,
+ insecure=dscv)
- def _get_data_processing_client(self, username, password, tenant_name):
+ def _get_data_processing_client(self, credentials):
if not CONF.service_available.sahara:
# Sahara isn't available
return None
import saharaclient.client
- self._validate_credentials(username, password, tenant_name)
-
endpoint_type = CONF.data_processing.endpoint_type
catalog_type = CONF.data_processing.catalog_type
auth_url = CONF.identity.uri
- client = saharaclient.client.Client(self.SAHARACLIENT_VERSION,
- username, password,
- project_name=tenant_name,
- endpoint_type=endpoint_type,
- service_type=catalog_type,
- auth_url=auth_url)
+ client = saharaclient.client.Client(
+ self.SAHARACLIENT_VERSION,
+ credentials.username,
+ credentials.password,
+ project_name=credentials.tenant_name,
+ endpoint_type=endpoint_type,
+ service_type=catalog_type,
+ auth_url=auth_url)
return client
diff --git a/tempest/api/compute/v3/certificates/__init__.py b/tempest/cmd/__init__.py
similarity index 100%
copy from tempest/api/compute/v3/certificates/__init__.py
copy to tempest/cmd/__init__.py
diff --git a/tempest/cmd/javelin.py b/tempest/cmd/javelin.py
new file mode 100755
index 0000000..20ee63e
--- /dev/null
+++ b/tempest/cmd/javelin.py
@@ -0,0 +1,430 @@
+#!/usr/bin/env python
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Javelin makes resources that should survive an upgrade.
+
+Javelin is a tool for creating, verifying, and deleting a small set of
+resources in a declarative way.
+
+"""
+
+import logging
+import os
+import sys
+import unittest
+import yaml
+
+import argparse
+
+import tempest.auth
+from tempest import exceptions
+from tempest.services.compute.json import flavors_client
+from tempest.services.compute.json import servers_client
+from tempest.services.identity.json import identity_client
+from tempest.services.image.v2.json import image_client
+from tempest.services.object_storage import container_client
+from tempest.services.object_storage import object_client
+
+OPTS = {}
+USERS = {}
+RES = {}
+
+LOG = None
+
+
+class OSClient(object):
+ _creds = None
+ identity = None
+ servers = None
+
+ def __init__(self, user, pw, tenant):
+ _creds = tempest.auth.KeystoneV2Credentials(
+ username=user,
+ password=pw,
+ tenant_name=tenant)
+ _auth = tempest.auth.KeystoneV2AuthProvider(_creds)
+ self.identity = identity_client.IdentityClientJSON(_auth)
+ self.servers = servers_client.ServersClientJSON(_auth)
+ self.objects = object_client.ObjectClient(_auth)
+ self.containers = container_client.ContainerClient(_auth)
+ self.images = image_client.ImageClientV2JSON(_auth)
+ self.flavors = flavors_client.FlavorsClientJSON(_auth)
+
+
+def load_resources(fname):
+ """Load the expected resources from a yaml flie."""
+ return yaml.load(open(fname, 'r'))
+
+
+def keystone_admin():
+ return OSClient(OPTS.os_username, OPTS.os_password, OPTS.os_tenant_name)
+
+
+def client_for_user(name):
+ LOG.debug("Entering client_for_user")
+ if name in USERS:
+ user = USERS[name]
+ LOG.debug("Created client for user %s" % user)
+ return OSClient(user['name'], user['pass'], user['tenant'])
+ else:
+ LOG.error("%s not found in USERS: %s" % (name, USERS))
+
+###################
+#
+# TENANTS
+#
+###################
+
+
+def create_tenants(tenants):
+ """Create tenants from resource definition.
+
+ Don't create the tenants if they already exist.
+ """
+ admin = keystone_admin()
+ _, body = admin.identity.list_tenants()
+ existing = [x['name'] for x in body]
+ for tenant in tenants:
+ if tenant not in existing:
+ admin.identity.create_tenant(tenant)
+ else:
+ LOG.warn("Tenant '%s' already exists in this environment" % tenant)
+
+##############
+#
+# USERS
+#
+##############
+
+
+def _users_for_tenant(users, tenant):
+ u_for_t = []
+ for user in users:
+ for n in user:
+ if user[n]['tenant'] == tenant:
+ u_for_t.append(user[n])
+ return u_for_t
+
+
+def _tenants_from_users(users):
+ tenants = set()
+ for user in users:
+ for n in user:
+ tenants.add(user[n]['tenant'])
+ return tenants
+
+
+def _assign_swift_role(user):
+ admin = keystone_admin()
+ resp, roles = admin.identity.list_roles()
+ role = next(r for r in roles if r['name'] == 'Member')
+ LOG.debug(USERS[user])
+ try:
+ admin.identity.assign_user_role(
+ USERS[user]['tenant_id'],
+ USERS[user]['id'],
+ role['id'])
+ except exceptions.Conflict:
+ # don't care if it's already assigned
+ pass
+
+
+def create_users(users):
+ """Create tenants from resource definition.
+
+ Don't create the tenants if they already exist.
+ """
+ global USERS
+ LOG.info("Creating users")
+ admin = keystone_admin()
+ for u in users:
+ try:
+ tenant = admin.identity.get_tenant_by_name(u['tenant'])
+ except exceptions.NotFound:
+ LOG.error("Tenant: %s - not found" % u['tenant'])
+ continue
+ try:
+ admin.identity.get_user_by_username(tenant['id'], u['name'])
+ LOG.warn("User '%s' already exists in this environment"
+ % u['name'])
+ except exceptions.NotFound:
+ admin.identity.create_user(
+ u['name'], u['pass'], tenant['id'],
+ "%s@%s" % (u['name'], tenant['id']),
+ enabled=True)
+
+
+def collect_users(users):
+ global USERS
+ LOG.info("Creating users")
+ admin = keystone_admin()
+ for u in users:
+ tenant = admin.identity.get_tenant_by_name(u['tenant'])
+ u['tenant_id'] = tenant['id']
+ USERS[u['name']] = u
+ body = admin.identity.get_user_by_username(tenant['id'], u['name'])
+ USERS[u['name']]['id'] = body['id']
+
+
+class JavelinCheck(unittest.TestCase):
+ def __init__(self, users, resources):
+ super(JavelinCheck, self).__init__()
+ self.users = users
+ self.res = resources
+
+ def runTest(self, *args):
+ pass
+
+ def check(self):
+ self.check_users()
+ self.check_objects()
+ self.check_servers()
+
+ def check_users(self):
+ """Check that the users we expect to exist, do.
+
+ We don't use the resource list for this because we need to validate
+ that things like tenantId didn't drift across versions.
+ """
+ for name, user in self.users.iteritems():
+ client = keystone_admin()
+ _, found = client.identity.get_user(user['id'])
+ self.assertEqual(found['name'], user['name'])
+ self.assertEqual(found['tenantId'], user['tenant_id'])
+
+ # also ensure we can auth with that user, and do something
+ # on the cloud. We don't care about the results except that it
+ # remains authorized.
+ client = client_for_user(user['name'])
+ resp, body = client.servers.list_servers()
+ self.assertEqual(resp['status'], '200')
+
+ def check_objects(self):
+ """Check that the objects created are still there."""
+ for obj in self.res['objects']:
+ client = client_for_user(obj['owner'])
+ r, contents = client.objects.get_object(
+ obj['container'], obj['name'])
+ source = _file_contents(obj['file'])
+ self.assertEqual(contents, source)
+
+ def check_servers(self):
+ """Check that the servers are still up and running."""
+ for server in self.res['servers']:
+ client = client_for_user(server['owner'])
+ found = _get_server_by_name(client, server['name'])
+ self.assertIsNotNone(
+ found,
+ "Couldn't find expected server %s" % server['name'])
+
+ r, found = client.servers.get_server(found['id'])
+ # get the ipv4 address
+ addr = found['addresses']['private'][0]['addr']
+ self.assertEqual(os.system("ping -c 1 " + addr), 0,
+ "Server %s is not pingable at %s" % (
+ server['name'], addr))
+
+
+#######################
+#
+# OBJECTS
+#
+#######################
+
+
+def _file_contents(fname):
+ with open(fname, 'r') as f:
+ return f.read()
+
+
+def create_objects(objects):
+ LOG.info("Creating objects")
+ for obj in objects:
+ LOG.debug("Object %s" % obj)
+ _assign_swift_role(obj['owner'])
+ client = client_for_user(obj['owner'])
+ client.containers.create_container(obj['container'])
+ client.objects.create_object(
+ obj['container'], obj['name'],
+ _file_contents(obj['file']))
+
+#######################
+#
+# IMAGES
+#
+#######################
+
+
+def create_images(images):
+ for image in images:
+ client = client_for_user(image['owner'])
+
+ # only upload a new image if the name isn't there
+ r, body = client.images.image_list()
+ names = [x['name'] for x in body]
+ if image['name'] in names:
+ continue
+
+ # special handling for 3 part image
+ extras = {}
+ if image['format'] == 'ami':
+ r, aki = client.images.create_image(
+ 'javelin_' + image['aki'], 'aki', 'aki')
+ client.images.store_image(aki.get('id'), open(image['aki'], 'r'))
+ extras['kernel_id'] = aki.get('id')
+
+ r, ari = client.images.create_image(
+ 'javelin_' + image['ari'], 'ari', 'ari')
+ client.images.store_image(ari.get('id'), open(image['ari'], 'r'))
+ extras['ramdisk_id'] = ari.get('id')
+
+ r, body = client.images.create_image(
+ image['name'], image['format'], image['format'], **extras)
+ image_id = body.get('id')
+ client.images.store_image(image_id, open(image['file'], 'r'))
+
+
+#######################
+#
+# SERVERS
+#
+#######################
+
+def _get_server_by_name(client, name):
+ r, body = client.servers.list_servers()
+ for server in body['servers']:
+ if name == server['name']:
+ return server
+ return None
+
+
+def _get_image_by_name(client, name):
+ r, body = client.images.image_list()
+ for image in body:
+ if name == image['name']:
+ return image
+ return None
+
+
+def _get_flavor_by_name(client, name):
+ r, body = client.flavors.list_flavors()
+ for flavor in body:
+ if name == flavor['name']:
+ return flavor
+ return None
+
+
+def create_servers(servers):
+ for server in servers:
+ client = client_for_user(server['owner'])
+
+ if _get_server_by_name(client, server['name']):
+ continue
+
+ image_id = _get_image_by_name(client, server['image'])['id']
+ flavor_id = _get_flavor_by_name(client, server['flavor'])['id']
+ client.servers.create_server(server['name'], image_id, flavor_id)
+
+
+#######################
+#
+# MAIN LOGIC
+#
+#######################
+
+def create_resources():
+ LOG.info("Creating Resources")
+ # first create keystone level resources, and we need to be admin
+ # for those.
+ create_tenants(RES['tenants'])
+ create_users(RES['users'])
+ collect_users(RES['users'])
+
+ # next create resources in a well known order
+ create_objects(RES['objects'])
+ create_images(RES['images'])
+ create_servers(RES['servers'])
+
+
+def get_options():
+ global OPTS
+ parser = argparse.ArgumentParser(
+ description='Create and validate a fixed set of OpenStack resources')
+ parser.add_argument('-m', '--mode',
+ metavar='<create|check|destroy>',
+ required=True,
+ help=('One of (create, check, destroy)'))
+ parser.add_argument('-r', '--resources',
+ required=True,
+ metavar='resourcefile.yaml',
+ help='Resources definition yaml file')
+ # auth bits, letting us also just source the devstack openrc
+ parser.add_argument('--os-username',
+ metavar='<auth-user-name>',
+ default=os.environ.get('OS_USERNAME'),
+ help=('Defaults to env[OS_USERNAME].'))
+ parser.add_argument('--os-password',
+ metavar='<auth-password>',
+ default=os.environ.get('OS_PASSWORD'),
+ help=('Defaults to env[OS_PASSWORD].'))
+ parser.add_argument('--os-tenant-name',
+ metavar='<auth-tenant-name>',
+ default=os.environ.get('OS_TENANT_NAME'),
+ help=('Defaults to env[OS_TENANT_NAME].'))
+
+ OPTS = parser.parse_args()
+ if OPTS.mode not in ('create', 'check', 'destroy'):
+ print("ERROR: Unknown mode -m %s\n" % OPTS.mode)
+ parser.print_help()
+ sys.exit(1)
+
+
+def setup_logging(debug=True):
+ global LOG
+ LOG = logging.getLogger(__name__)
+ if debug:
+ LOG.setLevel(logging.DEBUG)
+ else:
+ LOG.setLevel(logging.INFO)
+
+ ch = logging.StreamHandler(sys.stdout)
+ ch.setLevel(logging.DEBUG)
+ formatter = logging.Formatter(
+ datefmt='%Y-%m-%d %H:%M:%S',
+ fmt='%(asctime)s.%(msecs).03d - %(levelname)s - %(message)s')
+ ch.setFormatter(formatter)
+ LOG.addHandler(ch)
+
+
+def main():
+ global RES
+ get_options()
+ setup_logging()
+ RES = load_resources(OPTS.resources)
+
+ if OPTS.mode == 'create':
+ create_resources()
+ elif OPTS.mode == 'check':
+ collect_users(RES['users'])
+ checker = JavelinCheck(USERS, RES)
+ checker.check()
+ elif OPTS.mode == 'destroy':
+ LOG.warn("Destroy mode not yet implemented")
+ else:
+ LOG.error('Unknown mode %s' % OPTS.mode)
+ return 1
+ return 0
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/tempest/cmd/resources.yaml b/tempest/cmd/resources.yaml
new file mode 100644
index 0000000..f7cb8a9
--- /dev/null
+++ b/tempest/cmd/resources.yaml
@@ -0,0 +1,51 @@
+# This is a yaml description for the most basic definitions
+# of what should exist across the resource boundary. Perhaps
+# one day this will grow into a Heat resource template, but as
+# Heat isn't a known working element in the upgrades, we do
+# this much simpler thing for now.
+
+tenants:
+ - javelin
+ - discuss
+
+users:
+ - name: javelin
+ pass: gungnir
+ tenant: javelin
+ - name: javelin2
+ pass: gungnir2
+ tenant: discuss
+
+secgroups:
+ - angon:
+ owner: javelin
+ rules:
+ - 'icmp -1 -1 0.0.0.0/0'
+ - 'tcp 22 22 0.0.0.0/0'
+
+# resources that we want to create
+images:
+ - name: javelin_cirros
+ owner: javelin
+ file: cirros-0.3.2-x86_64-blank.img
+ format: ami
+ aki: cirros-0.3.2-x86_64-vmlinuz
+ ari: cirros-0.3.2-x86_64-initrd
+volumes:
+ - assegai:
+ - owner: javelin
+ - gb: 1
+servers:
+ - name: peltast
+ owner: javelin
+ flavor: m1.small
+ image: javelin_cirros
+ - name: hoplite
+ owner: javelin
+ flavor: m1.medium
+ image: javelin_cirros
+objects:
+ - container: jc1
+ name: javelin1
+ owner: javelin
+ file: /etc/hosts
diff --git a/tempest/stress/run_stress.py b/tempest/cmd/run_stress.py
similarity index 98%
rename from tempest/stress/run_stress.py
rename to tempest/cmd/run_stress.py
index c7c17c0..f773996 100755
--- a/tempest/stress/run_stress.py
+++ b/tempest/cmd/run_stress.py
@@ -70,7 +70,29 @@
return tests
-def main(ns):
+parser = argparse.ArgumentParser(description='Run stress tests')
+parser.add_argument('-d', '--duration', default=300, type=int,
+ help="Duration of test in secs")
+parser.add_argument('-s', '--serial', action='store_true',
+ help="Trigger running tests serially")
+parser.add_argument('-S', '--stop', action='store_true',
+ default=False, help="Stop on first error")
+parser.add_argument('-n', '--number', type=int,
+ help="How often an action is executed for each process")
+group = parser.add_mutually_exclusive_group(required=True)
+group.add_argument('-a', '--all', action='store_true',
+ help="Execute all stress tests")
+parser.add_argument('-T', '--type',
+ help="Filters tests of a certain type (e.g. gate)")
+parser.add_argument('-i', '--call-inherited', action='store_true',
+ default=False,
+ help="Call also inherited function with stress attribute")
+group.add_argument('-t', "--tests", nargs='?',
+ help="Name of the file with test description")
+
+
+def main():
+ ns = parser.parse_args()
result = 0
if not ns.all:
tests = json.load(open(ns.tests, 'r'))
@@ -97,29 +119,9 @@
return result
-parser = argparse.ArgumentParser(description='Run stress tests')
-parser.add_argument('-d', '--duration', default=300, type=int,
- help="Duration of test in secs")
-parser.add_argument('-s', '--serial', action='store_true',
- help="Trigger running tests serially")
-parser.add_argument('-S', '--stop', action='store_true',
- default=False, help="Stop on first error")
-parser.add_argument('-n', '--number', type=int,
- help="How often an action is executed for each process")
-group = parser.add_mutually_exclusive_group(required=True)
-group.add_argument('-a', '--all', action='store_true',
- help="Execute all stress tests")
-parser.add_argument('-T', '--type',
- help="Filters tests of a certain type (e.g. gate)")
-parser.add_argument('-i', '--call-inherited', action='store_true',
- default=False,
- help="Call also inherited function with stress attribute")
-group.add_argument('-t', "--tests", nargs='?',
- help="Name of the file with test description")
-
if __name__ == "__main__":
try:
- sys.exit(main(parser.parse_args()))
+ sys.exit(main())
except Exception:
LOG.exception("Failure in the stress test framework")
sys.exit(1)
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
new file mode 100755
index 0000000..7b2e60b
--- /dev/null
+++ b/tempest/cmd/verify_tempest_config.py
@@ -0,0 +1,351 @@
+#!/usr/bin/env python
+
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import argparse
+import json
+import os
+import sys
+import urlparse
+
+import httplib2
+from six.moves import configparser
+
+from tempest import clients
+from tempest import config
+
+
+CONF = config.CONF
+RAW_HTTP = httplib2.Http()
+CONF_FILE = None
+OUTFILE = sys.stdout
+
+
+def _get_config_file():
+ default_config_dir = os.path.join(os.path.abspath(
+ os.path.dirname(os.path.dirname(__file__))), "etc")
+ default_config_file = "tempest.conf"
+
+ conf_dir = os.environ.get('TEMPEST_CONFIG_DIR', default_config_dir)
+ conf_file = os.environ.get('TEMPEST_CONFIG', default_config_file)
+ path = os.path.join(conf_dir, conf_file)
+ fd = open(path, 'rw')
+ return fd
+
+
+def change_option(option, group, value):
+ config_parse = configparser.SafeConfigParser()
+ config_parse.optionxform = str
+ config_parse.readfp(CONF_FILE)
+ if not config_parse.has_section(group):
+ config_parse.add_section(group)
+ config_parse.set(group, option, str(value))
+ global OUTFILE
+ config_parse.write(OUTFILE)
+
+
+def print_and_or_update(option, group, value, update):
+ print('Config option %s in group %s should be changed to: %s'
+ % (option, group, value))
+ if update:
+ change_option(option, group, value)
+
+
+def verify_glance_api_versions(os, update):
+ # Check glance api versions
+ __, versions = os.image_client.get_versions()
+ if CONF.image_feature_enabled.api_v1 != ('v1.1' in versions or 'v1.0' in
+ versions):
+ print_and_or_update('api_v1', 'image_feature_enabled',
+ not CONF.image_feature_enabled.api_v1, update)
+ if CONF.image_feature_enabled.api_v2 != ('v2.0' in versions):
+ print_and_or_update('api_v2', 'image_feature_enabled',
+ not CONF.image_feature_enabled.api_v2, update)
+
+
+def _get_unversioned_endpoint(base_url):
+ endpoint_parts = urlparse.urlparse(base_url)
+ endpoint = endpoint_parts.scheme + '://' + endpoint_parts.netloc
+ return endpoint
+
+
+def _get_api_versions(os, service):
+ client_dict = {
+ 'nova': os.servers_client,
+ 'keystone': os.identity_client,
+ 'cinder': os.volumes_client,
+ }
+ client_dict[service].skip_path()
+ endpoint = _get_unversioned_endpoint(client_dict[service].base_url)
+ __, body = RAW_HTTP.request(endpoint, 'GET')
+ client_dict[service].reset_path()
+ body = json.loads(body)
+ if service == 'keystone':
+ versions = map(lambda x: x['id'], body['versions']['values'])
+ else:
+ versions = map(lambda x: x['id'], body['versions'])
+ return versions
+
+
+def verify_keystone_api_versions(os, update):
+ # Check keystone api versions
+ versions = _get_api_versions(os, 'keystone')
+ if CONF.identity_feature_enabled.api_v2 != ('v2.0' in versions):
+ print_and_or_update('api_v2', 'identity_feature_enabled',
+ not CONF.identity_feature_enabled.api_v2, update)
+ if CONF.identity_feature_enabled.api_v3 != ('v3.0' in versions):
+ print_and_or_update('api_v3', 'identity_feature_enabled',
+ not CONF.identity_feature_enabled.api_v3, update)
+
+
+def verify_nova_api_versions(os, update):
+ versions = _get_api_versions(os, 'nova')
+ if CONF.compute_feature_enabled.api_v3 != ('v3.0' in versions):
+ print_and_or_update('api_v3', 'compute_feature_enabled',
+ not CONF.compute_feature_enabled.api_v3, update)
+
+
+def verify_cinder_api_versions(os, update):
+ # Check cinder api versions
+ versions = _get_api_versions(os, 'cinder')
+ if CONF.volume_feature_enabled.api_v1 != ('v1.0' in versions):
+ print_and_or_update('api_v1', 'volume_feature_enabled',
+ not CONF.volume_feature_enabled.api_v1, update)
+ if CONF.volume_feature_enabled.api_v2 != ('v2.0' in versions):
+ print_and_or_update('api_v2', 'volume_feature_enabled',
+ not CONF.volume_feature_enabled.api_v2, update)
+
+
+def get_extension_client(os, service):
+ extensions_client = {
+ 'nova': os.extensions_client,
+ 'nova_v3': os.extensions_v3_client,
+ 'cinder': os.volumes_extension_client,
+ 'neutron': os.network_client,
+ 'swift': os.account_client,
+ }
+ if service not in extensions_client:
+ print('No tempest extensions client for %s' % service)
+ exit(1)
+ return extensions_client[service]
+
+
+def get_enabled_extensions(service):
+ extensions_options = {
+ 'nova': CONF.compute_feature_enabled.api_extensions,
+ 'nova_v3': CONF.compute_feature_enabled.api_v3_extensions,
+ 'cinder': CONF.volume_feature_enabled.api_extensions,
+ 'neutron': CONF.network_feature_enabled.api_extensions,
+ 'swift': CONF.object_storage_feature_enabled.discoverable_apis,
+ }
+ if service not in extensions_options:
+ print('No supported extensions list option for %s' % service)
+ exit(1)
+ return extensions_options[service]
+
+
+def verify_extensions(os, service, results):
+ extensions_client = get_extension_client(os, service)
+ __, resp = extensions_client.list_extensions()
+ if isinstance(resp, dict):
+ # Neutron's extension 'name' field has is not a single word (it has
+ # spaces in the string) Since that can't be used for list option the
+ # api_extension option in the network-feature-enabled group uses alias
+ # instead of name.
+ if service == 'neutron':
+ extensions = map(lambda x: x['alias'], resp['extensions'])
+ elif service == 'swift':
+ # Remove Swift general information from extensions list
+ resp.pop('swift')
+ extensions = resp.keys()
+ else:
+ extensions = map(lambda x: x['name'], resp['extensions'])
+
+ else:
+ extensions = map(lambda x: x['name'], resp)
+ if not results.get(service):
+ results[service] = {}
+ extensions_opt = get_enabled_extensions(service)
+ if extensions_opt[0] == 'all':
+ results[service]['extensions'] = extensions
+ return results
+ # Verify that all configured extensions are actually enabled
+ for extension in extensions_opt:
+ results[service][extension] = extension in extensions
+ # Verify that there aren't additional extensions enabled that aren't
+ # specified in the config list
+ for extension in extensions:
+ if extension not in extensions_opt:
+ results[service][extension] = False
+ return results
+
+
+def display_results(results, update, replace):
+ update_dict = {
+ 'swift': 'object-storage-feature-enabled',
+ 'nova': 'compute-feature-enabled',
+ 'nova_v3': 'compute-feature-enabled',
+ 'cinder': 'volume-feature-enabled',
+ 'neutron': 'network-feature-enabled',
+ }
+ for service in results:
+ # If all extensions are specified as being enabled there is no way to
+ # verify this so we just assume this to be true
+ if results[service].get('extensions'):
+ if replace:
+ output_list = results[service].get('extensions')
+ else:
+ output_list = ['all']
+ else:
+ extension_list = get_enabled_extensions(service)
+ output_list = []
+ for extension in results[service]:
+ if not results[service][extension]:
+ if extension in extension_list:
+ print("%s extension: %s should not be included in the "
+ "list of enabled extensions" % (service,
+ extension))
+ else:
+ print("%s extension: %s should be included in the list"
+ " of enabled extensions" % (service, extension))
+ output_list.append(extension)
+ else:
+ output_list.append(extension)
+ if update:
+ # Sort List
+ output_list.sort()
+ # Convert list to a string
+ output_string = ', '.join(output_list)
+ if service == 'swift':
+ change_option('discoverable_apis', update_dict[service],
+ output_string)
+ elif service == 'nova_v3':
+ change_option('api_v3_extensions', update_dict[service],
+ output_string)
+ else:
+ change_option('api_extensions', update_dict[service],
+ output_string)
+
+
+def check_service_availability(os, update):
+ services = []
+ avail_services = []
+ codename_match = {
+ 'volume': 'cinder',
+ 'network': 'neutron',
+ 'image': 'glance',
+ 'object_storage': 'swift',
+ 'compute': 'nova',
+ 'orchestration': 'heat',
+ 'metering': 'ceilometer',
+ 'telemetry': 'ceilometer',
+ 'data_processing': 'sahara',
+ 'baremetal': 'ironic',
+ 'identity': 'keystone',
+ 'queuing': 'marconi',
+ 'database': 'trove'
+ }
+ # Get catalog list for endpoints to use for validation
+ __, endpoints = os.endpoints_client.list_endpoints()
+ for endpoint in endpoints:
+ __, service = os.service_client.get_service(endpoint['service_id'])
+ services.append(service['type'])
+ # Pull all catalog types from config file and compare against endpoint list
+ for cfgname in dir(CONF._config):
+ cfg = getattr(CONF, cfgname)
+ catalog_type = getattr(cfg, 'catalog_type', None)
+ if not catalog_type:
+ continue
+ else:
+ if cfgname == 'identity':
+ # Keystone is a required service for tempest
+ continue
+ if catalog_type not in services:
+ if getattr(CONF.service_available, codename_match[cfgname]):
+ print('Endpoint type %s not found either disable service '
+ '%s or fix the catalog_type in the config file' % (
+ catalog_type, codename_match[cfgname]))
+ if update:
+ change_option(codename_match[cfgname],
+ 'service_available', False)
+ else:
+ if not getattr(CONF.service_available,
+ codename_match[cfgname]):
+ print('Endpoint type %s is available, service %s should be'
+ ' set as available in the config file.' % (
+ catalog_type, codename_match[cfgname]))
+ if update:
+ change_option(codename_match[cfgname],
+ 'service_available', True)
+ else:
+ avail_services.append(codename_match[cfgname])
+ return avail_services
+
+
+def parse_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-u', '--update', action='store_true',
+ help='Update the config file with results from api '
+ 'queries. This assumes whatever is set in the '
+ 'config file is incorrect. In the case of '
+ 'endpoint checks where it could either be the '
+ 'incorrect catalog type or the service available '
+ 'option the service available option is assumed '
+ 'to be incorrect and is thus changed')
+ parser.add_argument('-o', '--output',
+ help="Output file to write an updated config file to. "
+ "This has to be a separate file from the "
+ "original config file. If one isn't specified "
+ "with -u the new config file will be printed to "
+ "STDOUT")
+ parser.add_argument('-r', '--replace-ext', action='store_true',
+ help="If specified the all option will be replaced "
+ "with a full list of extensions")
+ args = parser.parse_args()
+ return args
+
+
+def main():
+ print('Running config verification...')
+ opts = parse_args()
+ update = opts.update
+ replace = opts.replace_ext
+ global CONF_FILE
+ global OUTFILE
+ if update:
+ CONF_FILE = _get_config_file()
+ if opts.output:
+ OUTFILE = open(opts.output, 'w+')
+ os = clients.ComputeAdminManager(interface='json')
+ services = check_service_availability(os, update)
+ results = {}
+ for service in ['nova', 'nova_v3', 'cinder', 'neutron', 'swift']:
+ if service == 'nova_v3' and 'nova' not in services:
+ continue
+ elif service not in services:
+ continue
+ results = verify_extensions(os, service, results)
+ verify_keystone_api_versions(os, update)
+ verify_glance_api_versions(os, update)
+ verify_nova_api_versions(os, update)
+ verify_cinder_api_versions(os, update)
+ display_results(results, update, replace)
+ if CONF_FILE:
+ CONF_FILE.close()
+ OUTFILE.close()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tempest/common/commands.py b/tempest/common/commands.py
index c31a038..6720847 100644
--- a/tempest/common/commands.py
+++ b/tempest/common/commands.py
@@ -28,15 +28,13 @@
args = shlex.split(cmd)
subprocess_args = {'stdout': subprocess.PIPE,
'stderr': subprocess.STDOUT}
- try:
- proc = subprocess.Popen(['/usr/bin/sudo', '-n'] + args,
- **subprocess_args)
- return proc.communicate()[0]
- if proc.returncode != 0:
- LOG.error(cmd + "returned with: " +
- proc.returncode + "exit status")
- except subprocess.CalledProcessError as e:
- LOG.error("command output:\n%s" % e.output)
+ proc = subprocess.Popen(['/usr/bin/sudo', '-n'] + args,
+ **subprocess_args)
+ stdout = proc.communicate()[0]
+ if proc.returncode != 0:
+ LOG.error(("Command {0} returned with exit status {1},"
+ "output {2}").format(cmd, proc.returncode, stdout))
+ return stdout
def ip_addr_raw():
@@ -77,3 +75,22 @@
def ovs_db_dump():
return sudo_cmd_call("ovsdb-client dump")
+
+
+def copy_file_to_host(file_from, dest, host, username, pkey):
+ dest = "%s@%s:%s" % (username, host, dest)
+ cmd = "scp -v -o UserKnownHostsFile=/dev/null " \
+ "-o StrictHostKeyChecking=no " \
+ "-i %(pkey)s %(file1)s %(dest)s" % {'pkey': pkey,
+ 'file1': file_from,
+ 'dest': dest}
+ args = shlex.split(cmd)
+ subprocess_args = {'stdout': subprocess.PIPE,
+ 'stderr': subprocess.STDOUT}
+ proc = subprocess.Popen(args, **subprocess_args)
+ stdout, stderr = proc.communicate()
+ if proc.returncode != 0:
+ LOG.error(("Command {0} returned with exit status {1},"
+ "output {2}, error {3}").format(cmd, proc.returncode,
+ stdout, stderr))
+ return stdout
diff --git a/tempest/common/isolated_creds.py b/tempest/common/isolated_creds.py
index c54a8e8..b4618ed 100644
--- a/tempest/common/isolated_creds.py
+++ b/tempest/common/isolated_creds.py
@@ -14,9 +14,7 @@
import netaddr
-import keystoneclient.v2_0.client as keystoneclient
-import neutronclient.v2_0.client as neutronclient
-
+from tempest import auth
from tempest import clients
from tempest.common.utils import data_utils
from tempest import config
@@ -42,24 +40,6 @@
self.identity_admin_client, self.network_admin_client = (
self._get_admin_clients())
- def _get_official_admin_clients(self):
- username = CONF.identity.admin_username
- password = CONF.identity.admin_password
- tenant_name = CONF.identity.admin_tenant_name
- auth_url = CONF.identity.uri
- dscv = CONF.identity.disable_ssl_certificate_validation
- identity_client = keystoneclient.Client(username=username,
- password=password,
- tenant_name=tenant_name,
- auth_url=auth_url,
- insecure=dscv)
- network_client = neutronclient.Client(username=username,
- password=password,
- tenant_name=tenant_name,
- auth_url=auth_url,
- insecure=dscv)
- return identity_client, network_client
-
def _get_admin_clients(self):
"""
Returns a tuple with instances of the following admin clients (in this
@@ -69,11 +49,11 @@
"""
if self.tempest_client:
os = clients.AdminManager(interface=self.interface)
- admin_clients = (os.identity_client,
- os.network_client,)
else:
- admin_clients = self._get_official_admin_clients()
- return admin_clients
+ os = clients.OfficialClientManager(
+ auth.get_default_credentials('identity_admin')
+ )
+ return os.identity_client, os.network_client
def _create_tenant(self, name, description):
if self.tempest_client:
@@ -185,22 +165,19 @@
self._assign_user_role(tenant['id'], user['id'], role['id'])
else:
self._assign_user_role(tenant.id, user.id, role.id)
- return user, tenant
+ return self._get_credentials(user, tenant)
- def _get_cred_names(self, user, tenant):
+ def _get_credentials(self, user, tenant):
if self.tempest_client:
- username = user.get('name')
- tenant_name = tenant.get('name')
+ user_get = user.get
+ tenant_get = tenant.get
else:
- username = user.name
- tenant_name = tenant.name
- return username, tenant_name
-
- def _get_tenant_id(self, tenant):
- if self.tempest_client:
- return tenant.get('id')
- else:
- return tenant.id
+ user_get = user.__dict__.get
+ tenant_get = tenant.__dict__.get
+ return auth.get_credentials(
+ username=user_get('name'), user_id=user_get('id'),
+ tenant_name=tenant_get('name'), tenant_id=tenant_get('id'),
+ password=self.password)
def _create_network_resources(self, tenant_id):
network = None
@@ -314,24 +291,6 @@
body = {'subnet_id': subnet_id}
self.network_admin_client.add_interface_router(router_id, body)
- def get_primary_tenant(self):
- return self.isolated_creds.get('primary')[1]
-
- def get_primary_user(self):
- return self.isolated_creds.get('primary')[0]
-
- def get_alt_tenant(self):
- return self.isolated_creds.get('alt')[1]
-
- def get_alt_user(self):
- return self.isolated_creds.get('alt')[0]
-
- def get_admin_tenant(self):
- return self.isolated_creds.get('admin')[1]
-
- def get_admin_user(self):
- return self.isolated_creds.get('admin')[0]
-
def get_primary_network(self):
return self.isolated_net_resources.get('primary')[0]
@@ -359,62 +318,33 @@
def get_alt_router(self):
return self.isolated_net_resources.get('alt')[2]
- def get_primary_creds(self):
- if self.isolated_creds.get('primary'):
- user, tenant = self.isolated_creds['primary']
- username, tenant_name = self._get_cred_names(user, tenant)
+ def get_credentials(self, credential_type):
+ if self.isolated_creds.get(credential_type):
+ credentials = self.isolated_creds[credential_type]
else:
- user, tenant = self._create_creds()
- username, tenant_name = self._get_cred_names(user, tenant)
- self.isolated_creds['primary'] = (user, tenant)
- LOG.info("Acquired isolated creds:\n user: %s, tenant: %s"
- % (username, tenant_name))
+ is_admin = (credential_type == 'admin')
+ credentials = self._create_creds(admin=is_admin)
+ self.isolated_creds[credential_type] = credentials
+ # Maintained until tests are ported
+ LOG.info("Acquired isolated creds:\n credentials: %s"
+ % credentials)
if CONF.service_available.neutron:
network, subnet, router = self._create_network_resources(
- self._get_tenant_id(tenant))
- self.isolated_net_resources['primary'] = (
+ credentials.tenant_id)
+ self.isolated_net_resources[credential_type] = (
network, subnet, router,)
LOG.info("Created isolated network resources for : \n"
- + " user: %s, tenant: %s" % (username, tenant_name))
- return username, tenant_name, self.password
+ + " credentials: %s" % credentials)
+ return credentials
+
+ def get_primary_creds(self):
+ return self.get_credentials('primary')
def get_admin_creds(self):
- if self.isolated_creds.get('admin'):
- user, tenant = self.isolated_creds['admin']
- username, tenant_name = self._get_cred_names(user, tenant)
- else:
- user, tenant = self._create_creds(admin=True)
- username, tenant_name = self._get_cred_names(user, tenant)
- self.isolated_creds['admin'] = (user, tenant)
- LOG.info("Acquired admin isolated creds:\n user: %s, tenant: %s"
- % (username, tenant_name))
- if CONF.service_available.neutron:
- network, subnet, router = self._create_network_resources(
- self._get_tenant_id(tenant))
- self.isolated_net_resources['admin'] = (
- network, subnet, router,)
- LOG.info("Created isolated network resources for : \n"
- + " user: %s, tenant: %s" % (username, tenant_name))
- return username, tenant_name, self.password
+ return self.get_credentials('admin')
def get_alt_creds(self):
- if self.isolated_creds.get('alt'):
- user, tenant = self.isolated_creds['alt']
- username, tenant_name = self._get_cred_names(user, tenant)
- else:
- user, tenant = self._create_creds()
- username, tenant_name = self._get_cred_names(user, tenant)
- self.isolated_creds['alt'] = (user, tenant)
- LOG.info("Acquired alt isolated creds:\n user: %s, tenant: %s"
- % (username, tenant_name))
- if CONF.service_available.neutron:
- network, subnet, router = self._create_network_resources(
- self._get_tenant_id(tenant))
- self.isolated_net_resources['alt'] = (
- network, subnet, router,)
- LOG.info("Created isolated network resources for : \n"
- + " user: %s, tenant: %s" % (username, tenant_name))
- return username, tenant_name, self.password
+ return self.get_credentials('alt')
def _clear_isolated_router(self, router_id, router_name):
net_client = self.network_admin_client
@@ -423,7 +353,6 @@
except exceptions.NotFound:
LOG.warn('router with name: %s not found for delete' %
router_name)
- pass
def _clear_isolated_subnet(self, subnet_id, subnet_name):
net_client = self.network_admin_client
@@ -432,7 +361,6 @@
except exceptions.NotFound:
LOG.warn('subnet with name: %s not found for delete' %
subnet_name)
- pass
def _clear_isolated_network(self, network_id, network_name):
net_client = self.network_admin_client
@@ -441,7 +369,6 @@
except exceptions.NotFound:
LOG.warn('network with name: %s not found for delete' %
network_name)
- pass
def _cleanup_ports(self, network_id):
# TODO(mlavalle) This method will be removed once patch
@@ -487,7 +414,6 @@
except exceptions.NotFound:
LOG.warn('router with name: %s not found for delete' %
router['name'])
- pass
self._clear_isolated_router(router['id'], router['name'])
if (not self.network_resources or
self.network_resources.get('network')):
@@ -505,29 +431,14 @@
if not self.isolated_creds:
return
self._clear_isolated_net_resources()
- for cred in self.isolated_creds:
- user, tenant = self.isolated_creds.get(cred)
+ for creds in self.isolated_creds.itervalues():
try:
- if self.tempest_client:
- self._delete_user(user['id'])
- else:
- self._delete_user(user.id)
+ self._delete_user(creds.user_id)
except exceptions.NotFound:
- if self.tempest_client:
- name = user['name']
- else:
- name = user.name
- LOG.warn("user with name: %s not found for delete" % name)
- pass
+ LOG.warn("user with name: %s not found for delete" %
+ creds.username)
try:
- if self.tempest_client:
- self._delete_tenant(tenant['id'])
- else:
- self._delete_tenant(tenant.id)
+ self._delete_tenant(creds.tenant_id)
except exceptions.NotFound:
- if self.tempest_client:
- name = tenant['name']
- else:
- name = tenant.name
- LOG.warn("tenant with name: %s not found for delete" % name)
- pass
+ LOG.warn("tenant with name: %s not found for delete" %
+ creds.tenant_name)
diff --git a/tempest/common/rest_client.py b/tempest/common/rest_client.py
index 8c07d4f..3c527f5 100644
--- a/tempest/common/rest_client.py
+++ b/tempest/common/rest_client.py
@@ -15,7 +15,6 @@
# under the License.
import collections
-import inspect
import json
from lxml import etree
import re
@@ -24,6 +23,7 @@
import jsonschema
from tempest.common import http
+from tempest.common.utils import misc as misc_utils
from tempest.common import xml_utils as common
from tempest import config
from tempest import exceptions
@@ -140,15 +140,23 @@
@property
def user(self):
- return self.auth_provider.credentials.get('username', None)
+ return self.auth_provider.credentials.username
+
+ @property
+ def user_id(self):
+ return self.auth_provider.credentials.user_id
@property
def tenant_name(self):
- return self.auth_provider.credentials.get('tenant_name', None)
+ return self.auth_provider.credentials.tenant_name
+
+ @property
+ def tenant_id(self):
+ return self.auth_provider.credentials.tenant_id
@property
def password(self):
- return self.auth_provider.credentials.get('password', None)
+ return self.auth_provider.credentials.password
@property
def base_url(self):
@@ -224,65 +232,22 @@
versions = map(lambda x: x['id'], body)
return resp, versions
- def _find_caller(self):
- """Find the caller class and test name.
-
- Because we know that the interesting things that call us are
- test_* methods, and various kinds of setUp / tearDown, we
- can look through the call stack to find appropriate methods,
- and the class we were in when those were called.
- """
- caller_name = None
- names = []
- frame = inspect.currentframe()
- is_cleanup = False
- # Start climbing the ladder until we hit a good method
- while True:
- try:
- frame = frame.f_back
- name = frame.f_code.co_name
- names.append(name)
- if re.search("^(test_|setUp|tearDown)", name):
- cname = ""
- if 'self' in frame.f_locals:
- cname = frame.f_locals['self'].__class__.__name__
- if 'cls' in frame.f_locals:
- cname = frame.f_locals['cls'].__name__
- caller_name = cname + ":" + name
- break
- elif re.search("^_run_cleanup", name):
- is_cleanup = True
- else:
- cname = ""
- if 'self' in frame.f_locals:
- cname = frame.f_locals['self'].__class__.__name__
- if 'cls' in frame.f_locals:
- cname = frame.f_locals['cls'].__name__
-
- # the fact that we are running cleanups is indicated pretty
- # deep in the stack, so if we see that we want to just
- # start looking for a real class name, and declare victory
- # once we do.
- if is_cleanup and cname:
- if not re.search("^RunTest", cname):
- caller_name = cname + ":_run_cleanups"
- break
- except Exception:
- break
- # prevents frame leaks
- del frame
- if caller_name is None:
- self.LOG.debug("Sane call name not found in %s" % names)
- return caller_name
-
def _get_request_id(self, resp):
for i in ('x-openstack-request-id', 'x-compute-request-id'):
if i in resp:
return resp[i]
return ""
+ def _log_request_start(self, method, req_url, req_headers={},
+ req_body=None):
+ caller_name = misc_utils.find_test_caller()
+ trace_regex = CONF.debug.trace_requests
+ if trace_regex and re.search(trace_regex, caller_name):
+ self.LOG.debug('Starting Request (%s): %s %s' %
+ (caller_name, method, req_url))
+
def _log_request(self, method, req_url, resp,
- secs="", req_headers=None,
+ secs="", req_headers={},
req_body=None, resp_body=None):
# if we have the request id, put it in the right part of the log
extra = dict(request_id=self._get_request_id(resp))
@@ -290,7 +255,7 @@
# we're going to just provide work around on who is actually
# providing timings by gracefully adding no content if they don't.
# Once we're down to 1 caller, clean this up.
- caller_name = self._find_caller()
+ caller_name = misc_utils.find_test_caller()
if secs:
secs = " %.3fs" % secs
self.LOG.info(
@@ -306,6 +271,8 @@
# world this is important to match
trace_regex = CONF.debug.trace_requests
if trace_regex and re.search(trace_regex, caller_name):
+ if 'X-Auth-Token' in req_headers:
+ req_headers['X-Auth-Token'] = '<omitted>'
log_fmt = """Request (%s): %s %s %s%s
Request - Headers: %s
Body: %s
@@ -369,7 +336,7 @@
# Parse one-item-like xmls (user, role, etc)
return common.xml_to_json(element)
- def response_checker(self, method, url, headers, body, resp, resp_body):
+ def response_checker(self, method, resp, resp_body):
if (resp.status in set((204, 205, 304)) or resp.status < 200 or
method.upper() == 'HEAD') and resp_body:
raise exceptions.ResponseWithNonEmptyBody(status=resp.status)
@@ -405,6 +372,7 @@
# Do the actual request, and time it
start = time.time()
+ self._log_request_start(method, req_url)
resp, resp_body = self.http_obj.request(
req_url, method, headers=req_headers, body=req_body)
end = time.time()
@@ -413,8 +381,7 @@
resp_body=resp_body)
# Verify HTTP response codes
- self.response_checker(method, url, req_headers, req_body, resp,
- resp_body)
+ self.response_checker(method, resp, resp_body)
return resp, resp_body
@@ -593,10 +560,12 @@
msg = ("The status code(%s) is different than the expected "
"one(%s)") % (resp.status, response_code)
raise exceptions.InvalidHttpSuccessCode(msg)
- response_schema = schema.get('response_body')
- if response_schema:
+
+ # Check the body of a response
+ body_schema = schema.get('response_body')
+ if body_schema:
try:
- jsonschema.validate(body, response_schema)
+ jsonschema.validate(body, body_schema)
except jsonschema.ValidationError as ex:
msg = ("HTTP response body is invalid (%s)") % ex
raise exceptions.InvalidHTTPResponseBody(msg)
@@ -605,6 +574,15 @@
msg = ("HTTP response body should not exist (%s)") % body
raise exceptions.InvalidHTTPResponseBody(msg)
+ # Check the header of a response
+ header_schema = schema.get('response_header')
+ if header_schema:
+ try:
+ jsonschema.validate(resp, header_schema)
+ except jsonschema.ValidationError as ex:
+ msg = ("HTTP response header is invalid (%s)") % ex
+ raise exceptions.InvalidHTTPResponseHeader(msg)
+
class NegativeRestClient(RestClient):
"""
diff --git a/tempest/common/utils/misc.py b/tempest/common/utils/misc.py
index a0b0c0a..0d78273 100644
--- a/tempest/common/utils/misc.py
+++ b/tempest/common/utils/misc.py
@@ -13,6 +13,13 @@
# License for the specific language governing permissions and limitations
# under the License.
+import inspect
+import re
+
+from tempest.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
def singleton(cls):
"""Simple wrapper for classes that should only have a single instance."""
@@ -23,3 +30,58 @@
instances[cls] = cls()
return instances[cls]
return getinstance
+
+
+def find_test_caller():
+ """Find the caller class and test name.
+
+ Because we know that the interesting things that call us are
+ test_* methods, and various kinds of setUp / tearDown, we
+ can look through the call stack to find appropriate methods,
+ and the class we were in when those were called.
+ """
+ caller_name = None
+ names = []
+ frame = inspect.currentframe()
+ is_cleanup = False
+ # Start climbing the ladder until we hit a good method
+ while True:
+ try:
+ frame = frame.f_back
+ name = frame.f_code.co_name
+ names.append(name)
+ if re.search("^(test_|setUp|tearDown)", name):
+ cname = ""
+ if 'self' in frame.f_locals:
+ cname = frame.f_locals['self'].__class__.__name__
+ if 'cls' in frame.f_locals:
+ cname = frame.f_locals['cls'].__name__
+ caller_name = cname + ":" + name
+ break
+ elif re.search("^_run_cleanup", name):
+ is_cleanup = True
+ elif name == 'main':
+ caller_name = 'main'
+ break
+ else:
+ cname = ""
+ if 'self' in frame.f_locals:
+ cname = frame.f_locals['self'].__class__.__name__
+ if 'cls' in frame.f_locals:
+ cname = frame.f_locals['cls'].__name__
+
+ # the fact that we are running cleanups is indicated pretty
+ # deep in the stack, so if we see that we want to just
+ # start looking for a real class name, and declare victory
+ # once we do.
+ if is_cleanup and cname:
+ if not re.search("^RunTest", cname):
+ caller_name = cname + ":_run_cleanups"
+ break
+ except Exception:
+ break
+ # prevents frame leaks
+ del frame
+ if caller_name is None:
+ LOG.debug("Sane call name not found in %s" % names)
+ return caller_name
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 8e6b9fb..d8474a0 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -13,6 +13,7 @@
import time
+from tempest.common.utils import misc as misc_utils
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
@@ -21,6 +22,16 @@
LOG = logging.getLogger(__name__)
+def _console_dump(client, server_id):
+ try:
+ resp, output = client.get_console_output(server_id, None)
+ LOG.debug("Console Output for Server %s:\n%s" % (
+ server_id, output))
+ except exceptions.NotFound:
+ LOG.debug("Server %s: doesn't have a console" % server_id)
+ pass
+
+
# NOTE(afazekas): This function needs to know a token and a subject.
def wait_for_server_status(client, server_id, status, ready_wait=True,
extra_timeout=0, raise_on_error=True):
@@ -70,7 +81,9 @@
'/'.join((old_status, str(old_task_state))),
'/'.join((server_status, str(task_state))),
time.time() - start_time)
+
if (server_status == 'ERROR') and raise_on_error:
+ _console_dump(client, server_id)
raise exceptions.BuildErrorException(server_id=server_id)
timed_out = int(time.time()) - start_time >= timeout
@@ -86,6 +99,11 @@
'timeout': timeout})
message += ' Current status: %s.' % server_status
message += ' Current task state: %s.' % task_state
+
+ caller = misc_utils.find_test_caller()
+ if caller:
+ message = '(%s) %s' % (caller, message)
+ _console_dump(client, server_id)
raise exceptions.TimeoutException(message)
old_status = server_status
old_task_state = task_state
@@ -119,4 +137,7 @@
'status': status,
'timeout': client.build_timeout})
message += ' Current status: %s.' % image['status']
+ caller = misc_utils.find_test_caller()
+ if caller:
+ message = '(%s) %s' % (caller, message)
raise exceptions.TimeoutException(message)
diff --git a/tempest/config.py b/tempest/config.py
index 7084768..94a725b 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -72,6 +72,10 @@
default=None,
help="API key to use when authenticating.",
secret=True),
+ cfg.StrOpt('domain_name',
+ default=None,
+ help="Domain name for authentication (Keystone V3)."
+ "The same domain applies to user and project"),
cfg.StrOpt('alt_username',
default=None,
help="Username of alternate user to use for Nova API "
@@ -84,6 +88,10 @@
default=None,
help="API key to use when authenticating as alternate user.",
secret=True),
+ cfg.StrOpt('alt_domain_name',
+ default=None,
+ help="Alternate domain name for authentication (Keystone V3)."
+ "The same domain applies to user and project"),
cfg.StrOpt('admin_username',
default=None,
help="Administrative Username to use for "
@@ -96,6 +104,10 @@
default=None,
help="API key to use when authenticating as admin.",
secret=True),
+ cfg.StrOpt('admin_domain_name',
+ default=None,
+ help="Admin domain name for authentication (Keystone V3)."
+ "The same domain applies to user and project"),
]
identity_feature_group = cfg.OptGroup(name='identity-feature-enabled',
@@ -125,11 +137,12 @@
"better parallel execution, but also requires that "
"OpenStack Identity API admin credentials are known."),
cfg.StrOpt('image_ref',
- default="{$IMAGE_ID}",
- help="Valid primary image reference to be used in tests."),
+ help="Valid primary image reference to be used in tests. "
+ "This is a required option"),
cfg.StrOpt('image_ref_alt',
- default="{$IMAGE_ID_ALT}",
- help="Valid secondary image reference to be used in tests."),
+ help="Valid secondary image reference to be used in tests. "
+ "This is a required option, but if only one image is "
+ "available duplicate the value of image_ref above"),
cfg.StrOpt('flavor_ref',
default="1",
help="Valid primary flavor to use in tests."),
@@ -151,7 +164,7 @@
help="Password used to authenticate to an instance using "
"the alternate image."),
cfg.IntOpt('build_interval',
- default=10,
+ default=1,
help="Time in seconds between build status checks."),
cfg.IntOpt('build_timeout',
default=300,
@@ -283,7 +296,15 @@
cfg.BoolOpt('vnc_console',
default=False,
help='Enable VNC console. This configuration value should '
- 'be same as [nova.vnc]->vnc_enabled in nova.conf')
+ 'be same as [nova.vnc]->vnc_enabled in nova.conf'),
+ cfg.BoolOpt('spice_console',
+ default=False,
+ help='Enable Spice console. This configuration value should '
+ 'be same as [nova.spice]->enabled in nova.conf'),
+ cfg.BoolOpt('rdp_console',
+ default=False,
+ help='Enable RDP console. This configuration value should '
+ 'be same as [nova.rdp]->enabled in nova.conf')
]
@@ -302,6 +323,10 @@
default=None,
help="API key to use when authenticating as admin.",
secret=True),
+ cfg.StrOpt('domain_name',
+ default=None,
+ help="Domain name for authentication as admin (Keystone V3)."
+ "The same domain applies to user and project"),
]
image_group = cfg.OptGroup(name='image',
@@ -387,7 +412,7 @@
help="Timeout in seconds to wait for network operation to "
"complete."),
cfg.IntOpt('build_interval',
- default=10,
+ default=1,
help="Time in seconds between network operation status "
"checks."),
]
@@ -412,6 +437,10 @@
cfg.StrOpt('catalog_type',
default='queuing',
help='Catalog type of the Queuing service.'),
+ cfg.IntOpt('max_queues_per_page',
+ default=20,
+ help='The maximum number of queue records per page when '
+ 'listing queues'),
]
volume_group = cfg.OptGroup(name='volume',
@@ -419,7 +448,7 @@
VolumeGroup = [
cfg.IntOpt('build_interval',
- default=10,
+ default=1,
help='Time in seconds between volume availability checks.'),
cfg.IntOpt('build_timeout',
default=300,
@@ -542,6 +571,9 @@
cfg.StrOpt('db_flavor_ref',
default="1",
help="Valid primary flavor to use in database tests."),
+ cfg.StrOpt('db_current_version',
+ default="v1.0",
+ help="Current database version to use in database tests."),
]
orchestration_group = cfg.OptGroup(name='orchestration',
@@ -606,6 +638,10 @@
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the telemetry service."),
+ cfg.BoolOpt('too_slow_to_test',
+ default=True,
+ help="This variable is used as flag to enable "
+ "notification tests")
]
@@ -996,6 +1032,13 @@
self.compute_admin.username = self.identity.admin_username
self.compute_admin.password = self.identity.admin_password
self.compute_admin.tenant_name = self.identity.admin_tenant_name
+ cfg.CONF.set_default('domain_name', self.identity.admin_domain_name,
+ group='identity')
+ cfg.CONF.set_default('alt_domain_name',
+ self.identity.admin_domain_name,
+ group='identity')
+ cfg.CONF.set_default('domain_name', self.identity.admin_domain_name,
+ group='compute-admin')
def __init__(self, parse_conf=True):
"""Initialize a configuration from a conf directory and conf file."""
@@ -1031,8 +1074,21 @@
class TempestConfigProxy(object):
_config = None
+ _extra_log_defaults = [
+ 'keystoneclient.session=INFO',
+ 'paramiko.transport=INFO',
+ 'requests.packages.urllib3.connectionpool=WARN'
+ ]
+
+ def _fix_log_levels(self):
+ """Tweak the oslo log defaults."""
+ for opt in logging.log_opts:
+ if opt.dest == 'default_log_levels':
+ opt.default.extend(self._extra_log_defaults)
+
def __getattr__(self, attr):
if not self._config:
+ self._fix_log_levels()
self._config = TempestConfigPrivate()
return getattr(self._config, attr)
diff --git a/tempest/exceptions.py b/tempest/exceptions.py
index 857e1e8..4eb1cea 100644
--- a/tempest/exceptions.py
+++ b/tempest/exceptions.py
@@ -118,7 +118,7 @@
class StackResourceBuildErrorException(TempestException):
- message = ("Resource %(resource_name) in stack %(stack_identifier)s is "
+ message = ("Resource %(resource_name)s in stack %(stack_identifier)s is "
"in %(resource_status)s status due to "
"'%(resource_status_reason)s'")
@@ -197,6 +197,10 @@
message = "HTTP response body is invalid json or xml"
+class InvalidHTTPResponseHeader(RestClientException):
+ message = "HTTP response header is invalid"
+
+
class InvalidContentType(RestClientException):
message = "Invalid content type provided"
diff --git a/tempest/hacking/checks.py b/tempest/hacking/checks.py
index 270851d..183d422 100644
--- a/tempest/hacking/checks.py
+++ b/tempest/hacking/checks.py
@@ -12,8 +12,11 @@
# License for the specific language governing permissions and limitations
# under the License.
+import os
import re
+import pep8
+
PYTHON_CLIENTS = ['cinder', 'glance', 'keystone', 'nova', 'swift', 'neutron',
'trove', 'ironic', 'savanna', 'heat', 'ceilometer',
@@ -22,7 +25,7 @@
PYTHON_CLIENT_RE = re.compile('import (%s)client' % '|'.join(PYTHON_CLIENTS))
TEST_DEFINITION = re.compile(r'^\s*def test.*')
SETUPCLASS_DEFINITION = re.compile(r'^\s*def setUpClass')
-SCENARIO_DECORATOR = re.compile(r'\s*@.*services\(')
+SCENARIO_DECORATOR = re.compile(r'\s*@.*services\((.*)\)')
VI_HEADER_RE = re.compile(r"^#\s+vim?:.+")
@@ -55,6 +58,10 @@
def no_setupclass_for_unit_tests(physical_line, filename):
+
+ if pep8.noqa(physical_line):
+ return
+
if 'tempest/tests' in filename:
if SETUPCLASS_DEFINITION.match(physical_line):
return (physical_line.find('def'),
@@ -75,8 +82,32 @@
return 0, "T106: Don't put vi configuration in source files"
+def service_tags_not_in_module_path(physical_line, filename):
+ """Check that a service tag isn't in the module path
+
+ A service tag should only be added if the service name isn't already in
+ the module path.
+
+ T107
+ """
+ # NOTE(mtreinish) Scenario tests always need service tags, but subdirs are
+ # created for services like heat which would cause false negatives for
+ # those tests, so just exclude the scenario tests.
+ if 'tempest/scenario' not in filename:
+ matches = SCENARIO_DECORATOR.match(physical_line)
+ if matches:
+ services = matches.group(1).split(',')
+ for service in services:
+ service_name = service.strip().strip("'")
+ modulepath = os.path.split(filename)[0]
+ if service_name in modulepath:
+ return (physical_line.find(service_name),
+ "T107: service tag should not be in path")
+
+
def factory(register):
register(import_no_clients_in_api)
register(scenario_tests_need_service_tags)
register(no_setupclass_for_unit_tests)
register(no_vi_headers)
+ register(service_tags_not_in_module_path)
diff --git a/tempest/manager.py b/tempest/manager.py
index 63235db..fb2842f 100644
--- a/tempest/manager.py
+++ b/tempest/manager.py
@@ -29,7 +29,7 @@
and a client object for a test case to use in performing actions.
"""
- def __init__(self, username=None, password=None, tenant_name=None):
+ def __init__(self, credentials=None):
"""
We allow overriding of the credentials used within the various
client classes managed by the Manager object. Left as None, the
@@ -38,29 +38,18 @@
:param credentials: Override of the credentials
"""
self.auth_version = CONF.identity.auth_version
- # FIXME(andreaf) Change Manager __init__ to accept a credentials dict
- if username is None or password is None:
- # Tenant None is a valid use case
- self.credentials = self.get_default_credentials()
+ if credentials is None:
+ self.credentials = auth.get_default_credentials('user')
else:
- self.credentials = dict(username=username, password=password,
- tenant_name=tenant_name)
- if self.auth_version == 'v3':
- self.credentials['domain_name'] = 'Default'
+ self.credentials = credentials
+ # Check if passed or default credentials are valid
+ if not self.credentials.is_valid():
+ raise exceptions.InvalidCredentials()
# Creates an auth provider for the credentials
self.auth_provider = self.get_auth_provider(self.credentials)
# FIXME(andreaf) unused
self.client_attr_names = []
- # we do this everywhere, have it be part of the super class
- def _validate_credentials(self, username, password, tenant_name):
- if None in (username, password, tenant_name):
- msg = ("Missing required credentials. "
- "username: %(u)s, password: %(p)s, "
- "tenant_name: %(t)s" %
- {'u': username, 'p': password, 't': tenant_name})
- raise exceptions.InvalidConfiguration(msg)
-
@classmethod
def get_auth_provider_class(cls, auth_version):
if auth_version == 'v2':
@@ -68,13 +57,6 @@
else:
return auth.KeystoneV3AuthProvider
- def get_default_credentials(self):
- return dict(
- username=CONF.identity.username,
- password=CONF.identity.password,
- tenant_name=CONF.identity.tenant_name
- )
-
def get_auth_provider(self, credentials):
if credentials is None:
raise exceptions.InvalidCredentials(
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 1e7ddb1..7703d4d 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -16,21 +16,27 @@
import logging
import os
+import re
import six
import subprocess
+import time
+from heatclient import exc as heat_exceptions
import netaddr
from neutronclient.common import exceptions as exc
from novaclient import exceptions as nova_exceptions
from tempest.api.network import common as net_common
+from tempest import auth
from tempest import clients
+from tempest.common import debug
from tempest.common import isolated_creds
from tempest.common.utils import data_utils
from tempest.common.utils.linux import remote_client
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log
+from tempest.openstack.common import timeutils
import tempest.test
CONF = config.CONF
@@ -65,10 +71,8 @@
cls.__name__, tempest_client=False,
network_resources=cls.network_resources)
- username, password, tenant_name = cls.credentials()
-
cls.manager = clients.OfficialClientManager(
- username, password, tenant_name)
+ credentials=cls.credentials())
cls.compute_client = cls.manager.compute_client
cls.image_client = cls.manager.image_client
cls.baremetal_client = cls.manager.baremetal_client
@@ -82,27 +86,27 @@
cls.os_resources = []
@classmethod
- def _get_credentials(cls, get_creds, prefix):
+ def _get_credentials(cls, get_creds, ctype):
if CONF.compute.allow_tenant_isolation:
- username, tenant_name, password = get_creds()
+ creds = get_creds()
else:
- username = getattr(CONF.identity, prefix + 'username')
- password = getattr(CONF.identity, prefix + 'password')
- tenant_name = getattr(CONF.identity, prefix + 'tenant_name')
- return username, password, tenant_name
+ creds = auth.get_default_credentials(ctype)
+ return creds
@classmethod
def credentials(cls):
- return cls._get_credentials(cls.isolated_creds.get_primary_creds, '')
+ return cls._get_credentials(cls.isolated_creds.get_primary_creds,
+ 'user')
@classmethod
def alt_credentials(cls):
- return cls._get_credentials(cls.isolated_creds.get_alt_creds, 'alt_')
+ return cls._get_credentials(cls.isolated_creds.get_alt_creds,
+ 'alt_user')
@classmethod
def admin_credentials(cls):
return cls._get_credentials(cls.isolated_creds.get_admin_creds,
- 'admin_')
+ 'identity_admin')
@staticmethod
def cleanup_resource(resource, test_name):
@@ -115,8 +119,10 @@
resource.delete()
except Exception as e:
# If the resource is already missing, mission accomplished.
- # add status code as workaround for bug 1247568
- if (e.__class__.__name__ == 'NotFound' or
+ # - Status code tolerated as a workaround for bug 1247568
+ # - HTTPNotFound tolerated as this is currently raised when
+ # attempting to delete an already-deleted heat stack.
+ if (e.__class__.__name__ in ('NotFound', 'HTTPNotFound') or
(hasattr(e, 'status_code') and e.status_code == 404)):
return
raise
@@ -280,7 +286,6 @@
for ruleset in rulesets:
sg_rule = client.security_group_rules.create(secgroup_id,
**ruleset)
- self.set_resource(sg_rule.id, sg_rule)
rules.append(sg_rule)
return rules
@@ -406,7 +411,7 @@
'name': name,
'container_format': fmt,
'disk_format': fmt,
- 'is_public': 'True',
+ 'is_public': 'False',
}
params.update(properties)
image = self.image_client.images.create(**params)
@@ -442,6 +447,30 @@
LOG.debug("image:%s" % self.image)
+# power/provision states as of icehouse
+class BaremetalPowerStates(object):
+ """Possible power states of an Ironic node."""
+ POWER_ON = 'power on'
+ POWER_OFF = 'power off'
+ REBOOT = 'rebooting'
+ SUSPEND = 'suspended'
+
+
+class BaremetalProvisionStates(object):
+ """Possible provision states of an Ironic node."""
+ NOSTATE = None
+ INIT = 'initializing'
+ ACTIVE = 'active'
+ BUILDING = 'building'
+ DEPLOYWAIT = 'wait call-back'
+ DEPLOYING = 'deploying'
+ DEPLOYFAIL = 'deploy failed'
+ DEPLOYDONE = 'deploy complete'
+ DELETING = 'deleting'
+ DELETED = 'deleted'
+ ERROR = 'error'
+
+
class BaremetalScenarioTest(OfficialClientTest):
@classmethod
def setUpClass(cls):
@@ -453,8 +482,8 @@
raise cls.skipException(msg)
# use an admin client manager for baremetal client
- username, password, tenant = cls.admin_credentials()
- manager = clients.OfficialClientManager(username, password, tenant)
+ admin_creds = cls.admin_credentials()
+ manager = clients.OfficialClientManager(credentials=admin_creds)
cls.baremetal_client = manager.baremetal_client
# allow any issues obtaining the node list to raise early
@@ -517,6 +546,55 @@
ports.append(self.baremetal_client.port.get(port.uuid))
return ports
+ def add_keypair(self):
+ self.keypair = self.create_keypair()
+
+ def verify_connectivity(self, ip=None):
+ if ip:
+ dest = self.get_remote_client(ip)
+ else:
+ dest = self.get_remote_client(self.instance)
+ dest.validate_authentication()
+
+ def boot_instance(self):
+ create_kwargs = {
+ 'key_name': self.keypair.id
+ }
+ self.instance = self.create_server(
+ wait=False, create_kwargs=create_kwargs)
+
+ self.set_resource('instance', self.instance)
+
+ self.wait_node(self.instance.id)
+ self.node = self.get_node(instance_id=self.instance.id)
+
+ self.wait_power_state(self.node.uuid, BaremetalPowerStates.POWER_ON)
+
+ self.wait_provisioning_state(
+ self.node.uuid,
+ [BaremetalProvisionStates.DEPLOYWAIT,
+ BaremetalProvisionStates.ACTIVE],
+ timeout=15)
+
+ self.wait_provisioning_state(self.node.uuid,
+ BaremetalProvisionStates.ACTIVE,
+ timeout=CONF.baremetal.active_timeout)
+
+ self.status_timeout(
+ self.compute_client.servers, self.instance.id, 'ACTIVE')
+
+ self.node = self.get_node(instance_id=self.instance.id)
+ self.instance = self.compute_client.servers.get(self.instance.id)
+
+ def terminate_instance(self):
+ self.instance.delete()
+ self.remove_resource('instance')
+ self.wait_power_state(self.node.uuid, BaremetalPowerStates.POWER_OFF)
+ self.wait_provisioning_state(
+ self.node.uuid,
+ BaremetalProvisionStates.NOSTATE,
+ timeout=CONF.baremetal.unprovision_timeout)
+
class NetworkScenarioTest(OfficialClientTest):
"""
@@ -541,13 +619,7 @@
@classmethod
def setUpClass(cls):
super(NetworkScenarioTest, cls).setUpClass()
- if CONF.compute.allow_tenant_isolation:
- cls.tenant_id = cls.isolated_creds.get_primary_tenant().id
- else:
- cls.tenant_id = cls.manager._get_identity_client(
- CONF.identity.username,
- CONF.identity.password,
- CONF.identity.tenant_name).tenant_id
+ cls.tenant_id = cls.manager.identity_client.tenant_id
def _create_network(self, tenant_id, namestart='network-smoke-'):
name = data_utils.rand_name(namestart)
@@ -783,6 +855,51 @@
private_key)
linux_client.validate_authentication()
+ def _check_public_network_connectivity(self, ip_address, username,
+ private_key, should_connect=True,
+ msg=None, servers=None):
+ # The target login is assumed to have been configured for
+ # key-based authentication by cloud-init.
+ LOG.debug('checking network connections to IP %s with user: %s' %
+ (ip_address, username))
+ try:
+ self._check_vm_connectivity(ip_address,
+ username,
+ private_key,
+ should_connect=should_connect)
+ except Exception:
+ ex_msg = 'Public network connectivity check failed'
+ if msg:
+ ex_msg += ": " + msg
+ LOG.exception(ex_msg)
+ self._log_console_output(servers)
+ debug.log_net_debug()
+ raise
+
+ def _check_tenant_network_connectivity(self, server,
+ username,
+ private_key,
+ should_connect=True,
+ servers_for_debug=None):
+ if not CONF.network.tenant_networks_reachable:
+ msg = 'Tenant networks not configured to be reachable.'
+ LOG.info(msg)
+ return
+ # The target login is assumed to have been configured for
+ # key-based authentication by cloud-init.
+ try:
+ for net_name, ip_addresses in server.networks.iteritems():
+ for ip_address in ip_addresses:
+ self._check_vm_connectivity(ip_address,
+ username,
+ private_key,
+ should_connect=should_connect)
+ except Exception:
+ LOG.exception('Tenant network connectivity check failed')
+ self._log_console_output(servers_for_debug)
+ debug.log_net_debug()
+ raise
+
def _check_remote_connectivity(self, source, dest, should_succeed=True):
"""
check ping server via source ssh connection
@@ -924,7 +1041,6 @@
client=client,
**sg_rule['security_group_rule']
)
- self.set_resource(sg_rule.id, sg_rule)
self.assertEqual(secgroup.tenant_id, sg_rule.tenant_id)
self.assertEqual(secgroup.id, sg_rule.security_group_id)
@@ -1053,10 +1169,10 @@
@classmethod
def credentials(cls):
- username = CONF.identity.admin_username
- password = CONF.identity.admin_password
- tenant_name = CONF.identity.tenant_name
- return username, password, tenant_name
+ admin_creds = auth.get_default_credentials('identity_admin')
+ creds = auth.get_default_credentials('user')
+ admin_creds.tenant_name = creds.tenant_name
+ return admin_creds
def _load_template(self, base_file, file_name):
filepath = os.path.join(os.path.dirname(os.path.realpath(base_file)),
@@ -1074,3 +1190,98 @@
for net in networks['networks']:
if net['name'] == CONF.compute.fixed_network_name:
return net
+
+ @staticmethod
+ def _stack_output(stack, output_key):
+ """Return a stack output value for a given key."""
+ return next((o['output_value'] for o in stack.outputs
+ if o['output_key'] == output_key), None)
+
+ def _ping_ip_address(self, ip_address, should_succeed=True):
+ cmd = ['ping', '-c1', '-w1', ip_address]
+
+ def ping():
+ proc = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc.wait()
+ return (proc.returncode == 0) == should_succeed
+
+ return tempest.test.call_until_true(
+ ping, CONF.orchestration.build_timeout, 1)
+
+ def _wait_for_resource_status(self, stack_identifier, resource_name,
+ status, failure_pattern='^.*_FAILED$'):
+ """Waits for a Resource to reach a given status."""
+ fail_regexp = re.compile(failure_pattern)
+ build_timeout = CONF.orchestration.build_timeout
+ build_interval = CONF.orchestration.build_interval
+
+ start = timeutils.utcnow()
+ while timeutils.delta_seconds(start,
+ timeutils.utcnow()) < build_timeout:
+ try:
+ res = self.client.resources.get(
+ stack_identifier, resource_name)
+ except heat_exceptions.HTTPNotFound:
+ # ignore this, as the resource may not have
+ # been created yet
+ pass
+ else:
+ if res.resource_status == status:
+ return
+ if fail_regexp.search(res.resource_status):
+ raise exceptions.StackResourceBuildErrorException(
+ resource_name=res.resource_name,
+ stack_identifier=stack_identifier,
+ resource_status=res.resource_status,
+ resource_status_reason=res.resource_status_reason)
+ time.sleep(build_interval)
+
+ message = ('Resource %s failed to reach %s status within '
+ 'the required time (%s s).' %
+ (res.resource_name, status, build_timeout))
+ raise exceptions.TimeoutException(message)
+
+ def _wait_for_stack_status(self, stack_identifier, status,
+ failure_pattern='^.*_FAILED$'):
+ """
+ Waits for a Stack to reach a given status.
+
+ Note this compares the full $action_$status, e.g
+ CREATE_COMPLETE, not just COMPLETE which is exposed
+ via the status property of Stack in heatclient
+ """
+ fail_regexp = re.compile(failure_pattern)
+ build_timeout = CONF.orchestration.build_timeout
+ build_interval = CONF.orchestration.build_interval
+
+ start = timeutils.utcnow()
+ while timeutils.delta_seconds(start,
+ timeutils.utcnow()) < build_timeout:
+ try:
+ stack = self.client.stacks.get(stack_identifier)
+ except heat_exceptions.HTTPNotFound:
+ # ignore this, as the stackource may not have
+ # been created yet
+ pass
+ else:
+ if stack.stack_status == status:
+ return
+ if fail_regexp.search(stack.stack_status):
+ raise exceptions.StackBuildErrorException(
+ stack_identifier=stack_identifier,
+ stack_status=stack.stack_status,
+ stack_status_reason=stack.stack_status_reason)
+ time.sleep(build_interval)
+
+ message = ('Stack %s failed to reach %s status within '
+ 'the required time (%s s).' %
+ (stack.stack_name, status, build_timeout))
+ raise exceptions.TimeoutException(message)
+
+ def _stack_delete(self, stack_identifier):
+ try:
+ self.client.stacks.delete(stack_identifier)
+ except heat_exceptions.HTTPNotFound:
+ pass
diff --git a/tempest/api/orchestration/stacks/templates/cfn_init_signal.yaml b/tempest/scenario/orchestration/cfn_init_signal.yaml
similarity index 97%
rename from tempest/api/orchestration/stacks/templates/cfn_init_signal.yaml
rename to tempest/scenario/orchestration/cfn_init_signal.yaml
index fa5345e..c95aabf 100644
--- a/tempest/api/orchestration/stacks/templates/cfn_init_signal.yaml
+++ b/tempest/scenario/orchestration/cfn_init_signal.yaml
@@ -62,7 +62,7 @@
#!/bin/bash -v
/opt/aws/bin/cfn-init
/opt/aws/bin/cfn-signal -e 0 --data "`cat /tmp/smoke-status`" \
- "WaitHandle"
+ --id smoke_status "WaitHandle"
WaitHandle:
Type: AWS::CloudFormation::WaitConditionHandle
WaitCondition:
diff --git a/tempest/scenario/orchestration/test_server_cfn_init.py b/tempest/scenario/orchestration/test_server_cfn_init.py
new file mode 100644
index 0000000..36e6126
--- /dev/null
+++ b/tempest/scenario/orchestration/test_server_cfn_init.py
@@ -0,0 +1,130 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+
+from tempest import config
+from tempest import exceptions
+from tempest.openstack.common import log as logging
+from tempest.scenario import manager
+from tempest import test
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+class CfnInitScenarioTest(manager.OrchestrationScenarioTest):
+
+ def setUp(self):
+ super(CfnInitScenarioTest, self).setUp()
+ if not CONF.orchestration.image_ref:
+ raise self.skipException("No image available to test")
+ self.client = self.orchestration_client
+ self.template_name = 'cfn_init_signal.yaml'
+
+ def assign_keypair(self):
+ self.stack_name = self._stack_rand_name()
+ if CONF.orchestration.keypair_name:
+ self.keypair = None
+ self.keypair_name = CONF.orchestration.keypair_name
+ else:
+ self.keypair = self.create_keypair()
+ self.keypair_name = self.keypair.id
+
+ def launch_stack(self):
+ net = self._get_default_network()
+ self.parameters = {
+ 'key_name': self.keypair_name,
+ 'flavor': CONF.orchestration.instance_type,
+ 'image': CONF.orchestration.image_ref,
+ 'timeout': CONF.orchestration.build_timeout,
+ 'network': net['id'],
+ }
+
+ # create the stack
+ self.template = self._load_template(__file__, self.template_name)
+ self.client.stacks.create(
+ stack_name=self.stack_name,
+ template=self.template,
+ parameters=self.parameters)
+
+ self.stack = self.client.stacks.get(self.stack_name)
+ self.stack_identifier = '%s/%s' % (self.stack_name, self.stack.id)
+ self.addCleanup(self._stack_delete, self.stack_identifier)
+
+ def check_stack(self):
+ sid = self.stack_identifier
+ self._wait_for_resource_status(
+ sid, 'WaitHandle', 'CREATE_COMPLETE')
+ self._wait_for_resource_status(
+ sid, 'SmokeSecurityGroup', 'CREATE_COMPLETE')
+ self._wait_for_resource_status(
+ sid, 'SmokeKeys', 'CREATE_COMPLETE')
+ self._wait_for_resource_status(
+ sid, 'CfnUser', 'CREATE_COMPLETE')
+ self._wait_for_resource_status(
+ sid, 'SmokeServer', 'CREATE_COMPLETE')
+
+ server_resource = self.client.resources.get(sid, 'SmokeServer')
+ server_id = server_resource.physical_resource_id
+ server = self.compute_client.servers.get(server_id)
+ server_ip = server.networks[CONF.compute.network_for_ssh][0]
+
+ if not self._ping_ip_address(server_ip):
+ self._log_console_output(servers=[server])
+ self.fail(
+ "Timed out waiting for %s to become reachable" % server_ip)
+
+ try:
+ self._wait_for_resource_status(
+ sid, 'WaitCondition', 'CREATE_COMPLETE')
+ except (exceptions.StackResourceBuildErrorException,
+ exceptions.TimeoutException) as e:
+ raise e
+ finally:
+ # attempt to log the server console regardless of WaitCondition
+ # going to complete. This allows successful and failed cloud-init
+ # logs to be compared
+ self._log_console_output(servers=[server])
+
+ self._wait_for_stack_status(sid, 'CREATE_COMPLETE')
+
+ stack = self.client.stacks.get(sid)
+
+ # This is an assert of great significance, as it means the following
+ # has happened:
+ # - cfn-init read the provided metadata and wrote out a file
+ # - a user was created and credentials written to the server
+ # - a cfn-signal was built which was signed with provided credentials
+ # - the wait condition was fulfilled and the stack has changed state
+ wait_status = json.loads(
+ self._stack_output(stack, 'WaitConditionStatus'))
+ self.assertEqual('smoke test complete', wait_status['smoke_status'])
+
+ if self.keypair:
+ # Check that the user can authenticate with the generated
+ # keypair
+ try:
+ linux_client = self.get_remote_client(
+ server_ip, username='ec2-user')
+ linux_client.validate_authentication()
+ except (exceptions.ServerUnreachable,
+ exceptions.SSHTimeout) as e:
+ self._log_console_output(servers=[server])
+ raise e
+
+ @test.attr(type='slow')
+ @test.services('orchestration', 'compute')
+ def test_server_cfn_init(self):
+ self.assign_keypair()
+ self.launch_stack()
+ self.check_stack()
diff --git a/tempest/scenario/test_aggregates_basic_ops.py b/tempest/scenario/test_aggregates_basic_ops.py
index 8e34c16..6817c48 100644
--- a/tempest/scenario/test_aggregates_basic_ops.py
+++ b/tempest/scenario/test_aggregates_basic_ops.py
@@ -54,8 +54,8 @@
def _get_host_name(self):
hosts = self.compute_client.hosts.list()
self.assertTrue(len(hosts) >= 1)
- hostname = hosts[0].host_name
- return hostname
+ computes = [x for x in hosts if x.service == 'compute']
+ return computes[0].host_name
def _add_host(self, aggregate_name, host):
aggregate = self.compute_client.aggregates.add_host(aggregate_name,
diff --git a/tempest/scenario/test_baremetal_basic_ops.py b/tempest/scenario/test_baremetal_basic_ops.py
index c53aa83..82c6b5d 100644
--- a/tempest/scenario/test_baremetal_basic_ops.py
+++ b/tempest/scenario/test_baremetal_basic_ops.py
@@ -23,31 +23,7 @@
LOG = logging.getLogger(__name__)
-# power/provision states as of icehouse
-class PowerStates(object):
- """Possible power states of an Ironic node."""
- POWER_ON = 'power on'
- POWER_OFF = 'power off'
- REBOOT = 'rebooting'
- SUSPEND = 'suspended'
-
-
-class ProvisionStates(object):
- """Possible provision states of an Ironic node."""
- NOSTATE = None
- INIT = 'initializing'
- ACTIVE = 'active'
- BUILDING = 'building'
- DEPLOYWAIT = 'wait call-back'
- DEPLOYING = 'deploying'
- DEPLOYFAIL = 'deploy failed'
- DEPLOYDONE = 'deploy complete'
- DELETING = 'deleting'
- DELETED = 'deleted'
- ERROR = 'error'
-
-
-class BaremetalBasicOptsPXESSH(manager.BaremetalScenarioTest):
+class BaremetalBasicOpsPXESSH(manager.BaremetalScenarioTest):
"""
This smoke test tests the pxe_ssh Ironic driver. It follows this basic
set of operations:
@@ -65,21 +41,11 @@
* Monitors the associated Ironic node for power and
expected state transitions
"""
- def add_keypair(self):
- self.keypair = self.create_keypair()
-
def add_floating_ip(self):
floating_ip = self.compute_client.floating_ips.create()
self.instance.add_floating_ip(floating_ip)
return floating_ip.ip
- def verify_connectivity(self, ip=None):
- if ip:
- dest = self.get_remote_client(ip)
- else:
- dest = self.get_remote_client(self.instance)
- dest.validate_authentication()
-
def validate_driver_info(self):
f_id = self.instance.flavor['id']
flavor_extra = self.compute_client.flavors.get(f_id).get_keys()
@@ -98,43 +64,6 @@
self.assertEqual(n_port['device_id'], self.instance.id)
self.assertEqual(n_port['mac_address'], port.address)
- def boot_instance(self):
- create_kwargs = {
- 'key_name': self.keypair.id
- }
- self.instance = self.create_server(
- wait=False, create_kwargs=create_kwargs)
-
- self.set_resource('instance', self.instance)
-
- self.wait_node(self.instance.id)
- self.node = self.get_node(instance_id=self.instance.id)
-
- self.wait_power_state(self.node.uuid, PowerStates.POWER_ON)
-
- self.wait_provisioning_state(
- self.node.uuid,
- [ProvisionStates.DEPLOYWAIT, ProvisionStates.ACTIVE],
- timeout=15)
-
- self.wait_provisioning_state(self.node.uuid, ProvisionStates.ACTIVE,
- timeout=CONF.baremetal.active_timeout)
-
- self.status_timeout(
- self.compute_client.servers, self.instance.id, 'ACTIVE')
-
- self.node = self.get_node(instance_id=self.instance.id)
- self.instance = self.compute_client.servers.get(self.instance.id)
-
- def terminate_instance(self):
- self.instance.delete()
- self.remove_resource('instance')
- self.wait_power_state(self.node.uuid, PowerStates.POWER_OFF)
- self.wait_provisioning_state(
- self.node.uuid,
- ProvisionStates.NOSTATE,
- timeout=CONF.baremetal.unprovision_timeout)
-
@test.services('baremetal', 'compute', 'image', 'network')
def test_baremetal_server_ops(self):
self.add_keypair()
diff --git a/tempest/scenario/test_large_ops.py b/tempest/scenario/test_large_ops.py
index b7a30f8..ed5743c 100644
--- a/tempest/scenario/test_large_ops.py
+++ b/tempest/scenario/test_large_ops.py
@@ -31,7 +31,7 @@
Test large operations.
This test below:
- * Spin up multiple instances in one nova call
+ * Spin up multiple instances in one nova call, and repeat three times
* as a regular user
* TODO: same thing for cinder
@@ -63,9 +63,20 @@
self.set_resource(server.name, server)
self._wait_for_server_status('ACTIVE')
- @test.services('compute', 'image')
- def test_large_ops_scenario(self):
+ def _large_ops_scenario(self):
if CONF.scenario.large_ops_number < 1:
return
self.glance_image_create()
self.nova_boot()
+
+ @test.services('compute', 'image')
+ def test_large_ops_scenario_1(self):
+ self._large_ops_scenario()
+
+ @test.services('compute', 'image')
+ def test_large_ops_scenario_2(self):
+ self._large_ops_scenario()
+
+ @test.services('compute', 'image')
+ def test_large_ops_scenario_3(self):
+ self._large_ops_scenario()
diff --git a/tempest/scenario/test_load_balancer_basic.py b/tempest/scenario/test_load_balancer_basic.py
index 5f71461..1c24b5c 100644
--- a/tempest/scenario/test_load_balancer_basic.py
+++ b/tempest/scenario/test_load_balancer_basic.py
@@ -13,10 +13,14 @@
# License for the specific language governing permissions and limitations
# under the License.
+
+import httplib
+import tempfile
import time
-import urllib
+import urllib2
from tempest.api.network import common as net_common
+from tempest.common import commands
from tempest import config
from tempest import exceptions
from tempest.scenario import manager
@@ -68,6 +72,7 @@
def setUp(self):
super(TestLoadBalancerBasic, self).setUp()
self.server_ips = {}
+ self.server_fixed_ips = {}
self._create_security_group()
def cleanup_wrapper(self, resource):
@@ -118,7 +123,8 @@
self.floating_ips[floating_ip] = server
self.server_ips[server.id] = floating_ip.floating_ip_address
else:
- self.server_ips[server.id] = server.networks[net.name][0]
+ self.server_ips[server.id] = server.networks[net['name']][0]
+ self.server_fixed_ips[server.id] = server.networks[net['name']][0]
self.assertTrue(self.servers_keypairs)
return server
@@ -133,71 +139,53 @@
1. SSH to the instance
2. Start two http backends listening on ports 80 and 88 respectively
- In case there are two instances, each backend is created on a separate
- instance.
-
- The backends are the inetd services. To start them we need to edit
- /etc/inetd.conf in the following way:
- www stream tcp nowait root /bin/sh sh /home/cirros/script_name
-
- Where /home/cirros/script_name is a path to a script which
- echoes the responses:
- echo -e 'HTTP/1.0 200 OK\r\n\r\nserver_name
-
- If we want the server to listen on port 88, then we use
- "kerberos" instead of "www".
"""
for server_id, ip in self.server_ips.iteritems():
private_key = self.servers_keypairs[server_id].private_key
server_name = self.compute_client.servers.get(server_id).name
+ username = config.scenario.ssh_user
ssh_client = self.get_remote_client(
server_or_ip=ip,
private_key=private_key)
ssh_client.validate_authentication()
- # Create service for inetd
- create_script = """sudo sh -c "echo -e \\"echo -e 'HTTP/1.0 """ \
- """200 OK\\\\\\r\\\\\\n\\\\\\r\\\\\\n""" \
- """%(server)s'\\" >>/home/cirros/%(script)s\""""
- cmd = create_script % {
- 'server': server_name,
- 'script': 'script1'}
+ # Write a backend's responce into a file
+ resp = """HTTP/1.0 200 OK\r\nContent-Length: 8\r\n\r\n%s"""
+ with tempfile.NamedTemporaryFile() as script:
+ script.write(resp % server_name)
+ script.flush()
+ with tempfile.NamedTemporaryFile() as key:
+ key.write(private_key)
+ key.flush()
+ commands.copy_file_to_host(script.name,
+ "~/script1",
+ ip,
+ username, key.name)
+ # Start netcat
+ start_server = """sudo nc -ll -p %(port)s -e cat """ \
+ """~/%(script)s &"""
+ cmd = start_server % {'port': self.port1,
+ 'script': 'script1'}
ssh_client.exec_command(cmd)
- # Configure inetd
- configure_inetd = """sudo sh -c "echo -e \\"%(service)s """ \
- """stream tcp nowait root /bin/sh sh """ \
- """/home/cirros/%(script)s\\" >> """ \
- """/etc/inetd.conf\""""
- # "www" stands for port 80
- cmd = configure_inetd % {'service': 'www',
- 'script': 'script1'}
- ssh_client.exec_command(cmd)
-
if len(self.server_ips) == 1:
- cmd = create_script % {'server': 'server2',
- 'script': 'script2'}
+ with tempfile.NamedTemporaryFile() as script:
+ script.write(resp % 'server2')
+ script.flush()
+ with tempfile.NamedTemporaryFile() as key:
+ key.write(private_key)
+ key.flush()
+ commands.copy_file_to_host(script.name,
+ "~/script2", ip,
+ username, key.name)
+ cmd = start_server % {'port': self.port2,
+ 'script': 'script2'}
ssh_client.exec_command(cmd)
- # "kerberos" stands for port 88
- cmd = configure_inetd % {'service': 'kerberos',
- 'script': 'script2'}
- ssh_client.exec_command(cmd)
-
- # Get PIDs of inetd
- pids = ssh_client.get_pids('inetd')
- if pids != ['']:
- # If there are any inetd processes, reload them
- kill_cmd = "sudo kill -HUP %s" % ' '.join(pids)
- ssh_client.exec_command(kill_cmd)
- else:
- # In other case start inetd
- start_inetd = "sudo /usr/sbin/inetd /etc/inetd.conf"
- ssh_client.exec_command(start_inetd)
def _check_connection(self, check_ip, port=80):
def try_connect(ip, port):
try:
- resp = urllib.urlopen("http://{0}:{1}/".format(ip, port))
+ resp = urllib2.urlopen("http://{0}:{1}/".format(ip, port))
if resp.getcode() == 200:
return True
return False
@@ -231,8 +219,8 @@
but with different ports to listen on.
"""
- for server_id, ip in self.server_ips.iteritems():
- if len(self.server_ips) == 1:
+ for server_id, ip in self.server_fixed_ips.iteritems():
+ if len(self.server_fixed_ips) == 1:
member1 = self._create_member(address=ip,
protocol_port=self.port1,
pool_id=self.pool.id)
@@ -282,25 +270,31 @@
def _check_load_balancing(self):
"""
- 1. Send 100 requests on the floating ip associated with the VIP
+ 1. Send 10 requests on the floating ip associated with the VIP
2. Check that the requests are shared between
the two servers and that both of them get equal portions
of the requests
"""
self._check_connection(self.vip_ip)
- resp = self._send_requests(self.vip_ip)
- self.assertEqual(set(["server1\n", "server2\n"]), set(resp))
- self.assertEqual(50, resp.count("server1\n"))
- self.assertEqual(50, resp.count("server2\n"))
+ self._send_requests(self.vip_ip, set(["server1", "server2"]))
- def _send_requests(self, vip_ip):
- resp = []
- for count in range(100):
- resp.append(
- urllib.urlopen(
- "http://{0}/".format(vip_ip)).read())
- return resp
+ def _send_requests(self, vip_ip, expected, num_req=10):
+ count = 0
+ while count < num_req:
+ try:
+ resp = []
+ for i in range(len(self.members)):
+ resp.append(
+ urllib2.urlopen(
+ "http://{0}/".format(vip_ip)).read())
+ count += 1
+ self.assertEqual(expected,
+ set(resp))
+ # NOTE: There always is a slim chance of getting this exception
+ # due to special aspects of haproxy internal behavior.
+ except httplib.BadStatusLine:
+ pass
@test.attr(type='smoke')
@test.services('compute', 'network')
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index 0ba65cf..f1cd320 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -15,12 +15,11 @@
import testtools
-from tempest.common import debug
from tempest.common.utils import data_utils
from tempest import config
from tempest.openstack.common import log as logging
from tempest.scenario import manager
-from tempest.test import services
+from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
@@ -85,62 +84,23 @@
public_network_id)
self.addCleanup(self.cleanup_wrapper, self.floating_ip)
- def _check_tenant_network_connectivity(self, server,
- username,
- private_key,
- should_connect=True):
- if not CONF.network.tenant_networks_reachable:
- msg = 'Tenant networks not configured to be reachable.'
- LOG.info(msg)
- return
- # The target login is assumed to have been configured for
- # key-based authentication by cloud-init.
- try:
- for net_name, ip_addresses in server.networks.iteritems():
- for ip_address in ip_addresses:
- self._check_vm_connectivity(ip_address,
- username,
- private_key,
- should_connect=should_connect)
- except Exception:
- LOG.exception('Tenant network connectivity check failed')
- self._log_console_output(servers=[server])
- debug.log_ip_ns()
- raise
-
- def _check_public_network_connectivity(self, floating_ip,
- username,
- private_key,
- should_connect=True):
- # The target login is assumed to have been configured for
- # key-based authentication by cloud-init.
- try:
- self._check_vm_connectivity(floating_ip, username, private_key,
- should_connect=should_connect)
- except Exception:
- LOG.exception("Public network connectivity check failed")
- debug.log_ip_ns()
- raise
-
def _check_network_connectivity(self, should_connect=True):
username = CONF.compute.image_ssh_user
private_key = self.keypair.private_key
- self._check_tenant_network_connectivity(self.server,
- username,
- private_key,
- should_connect=should_connect)
+ self._check_tenant_network_connectivity(
+ self.server, username, private_key, should_connect=should_connect,
+ servers_for_debug=[self.server])
floating_ip = self.floating_ip.floating_ip_address
- self._check_public_network_connectivity(floating_ip,
- username,
- private_key,
- should_connect=should_connect)
+ self._check_public_network_connectivity(floating_ip, username,
+ private_key, should_connect,
+ servers=[self.server])
def _wait_server_status_and_check_network_connectivity(self):
self.status_timeout(self.compute_client.servers, self.server.id,
'ACTIVE')
self._check_network_connectivity()
- @services('compute', 'network')
+ @test.services('compute', 'network')
def test_server_connectivity_stop_start(self):
self.server.stop()
self.status_timeout(self.compute_client.servers, self.server.id,
@@ -149,12 +109,12 @@
self.server.start()
self._wait_server_status_and_check_network_connectivity()
- @services('compute', 'network')
+ @test.services('compute', 'network')
def test_server_connectivity_reboot(self):
self.server.reboot()
self._wait_server_status_and_check_network_connectivity()
- @services('compute', 'network')
+ @test.services('compute', 'network')
def test_server_connectivity_rebuild(self):
image_ref_alt = CONF.compute.image_ref_alt
self.server.rebuild(image_ref_alt)
@@ -162,7 +122,7 @@
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
- @services('compute', 'network')
+ @test.services('compute', 'network')
def test_server_connectivity_pause_unpause(self):
self.server.pause()
self.status_timeout(self.compute_client.servers, self.server.id,
@@ -173,7 +133,7 @@
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
- @services('compute', 'network')
+ @test.services('compute', 'network')
def test_server_connectivity_suspend_resume(self):
self.server.suspend()
self.status_timeout(self.compute_client.servers, self.server.id,
@@ -184,7 +144,7 @@
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize is not available.')
- @services('compute', 'network')
+ @test.services('compute', 'network')
def test_server_connectivity_resize(self):
resize_flavor = CONF.compute.flavor_ref_alt
if resize_flavor == CONF.compute.flavor_ref:
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index d5ab3d3..21782ee 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -156,24 +156,13 @@
return dict(server=server, keypair=keypair)
def _check_tenant_network_connectivity(self):
- if not CONF.network.tenant_networks_reachable:
- msg = 'Tenant networks not configured to be reachable.'
- LOG.info(msg)
- return
- # The target login is assumed to have been configured for
- # key-based authentication by cloud-init.
ssh_login = CONF.compute.image_ssh_user
- try:
- for server, key in self.servers.iteritems():
- for net_name, ip_addresses in server.networks.iteritems():
- for ip_address in ip_addresses:
- self._check_vm_connectivity(ip_address, ssh_login,
- key.private_key)
- except Exception:
- LOG.exception('Tenant connectivity check failed')
- self._log_console_output(servers=self.servers.keys())
- debug.log_net_debug()
- raise
+ for server, key in self.servers.iteritems():
+ # call the common method in the parent class
+ super(TestNetworkBasicOps, self).\
+ _check_tenant_network_connectivity(
+ server, ssh_login, key.private_key,
+ servers_for_debug=self.servers.keys())
def _create_and_associate_floating_ips(self):
public_network_id = CONF.network.public_network_id
@@ -184,28 +173,16 @@
def _check_public_network_connectivity(self, should_connect=True,
msg=None):
- # The target login is assumed to have been configured for
- # key-based authentication by cloud-init.
ssh_login = CONF.compute.image_ssh_user
- LOG.debug('checking network connections')
floating_ip, server = self.floating_ip_tuple
ip_address = floating_ip.floating_ip_address
private_key = None
if should_connect:
private_key = self.servers[server].private_key
- try:
- self._check_vm_connectivity(ip_address,
- ssh_login,
- private_key,
- should_connect=should_connect)
- except Exception:
- ex_msg = 'Public network connectivity check failed'
- if msg:
- ex_msg += ": " + msg
- LOG.exception(ex_msg)
- self._log_console_output(servers=self.servers.keys())
- debug.log_net_debug()
- raise
+ # call the common method in the parent class
+ super(TestNetworkBasicOps, self)._check_public_network_connectivity(
+ ip_address, ssh_login, private_key, should_connect, msg,
+ self.servers.keys())
def _disassociate_floating_ips(self):
floating_ip, server = self.floating_ip_tuple
diff --git a/tempest/scenario/test_security_groups_basic_ops.py b/tempest/scenario/test_security_groups_basic_ops.py
index b1b06cc..4616b82 100644
--- a/tempest/scenario/test_security_groups_basic_ops.py
+++ b/tempest/scenario/test_security_groups_basic_ops.py
@@ -98,17 +98,10 @@
access point
"""
- def __init__(self, tenant_id, tenant_user, tenant_pass, tenant_name):
- self.manager = clients.OfficialClientManager(
- tenant_user,
- tenant_pass,
- tenant_name
- )
- self.keypair = None
- self.tenant_id = tenant_id
- self.tenant_name = tenant_name
- self.tenant_user = tenant_user
- self.tenant_pass = tenant_pass
+ def __init__(self, credentials):
+ self.manager = clients.OfficialClientManager(credentials)
+ # Credentials from manager are filled with both names and IDs
+ self.creds = self.manager.credentials
self.network = None
self.subnet = None
self.router = None
@@ -121,12 +114,14 @@
self.router = router
def _get_tenant_credentials(self):
- return self.tenant_user, self.tenant_pass, self.tenant_name
+ # FIXME(andreaf) Unused method
+ return self.creds
@classmethod
def check_preconditions(cls):
super(TestSecurityGroupsBasicOps, cls).check_preconditions()
- if (cls.alt_tenant_id is None) or (cls.tenant_id is cls.alt_tenant_id):
+ if (cls.alt_creds is None) or \
+ (cls.tenant_id is cls.alt_creds.tenant_id):
msg = 'No alt_tenant defined'
cls.enabled = False
raise cls.skipException(msg)
@@ -140,21 +135,20 @@
@classmethod
def setUpClass(cls):
super(TestSecurityGroupsBasicOps, cls).setUpClass()
- alt_creds = cls.alt_credentials()
- cls.alt_tenant_id = cls.manager._get_identity_client(
- *alt_creds
- ).tenant_id
+ cls.alt_creds = cls.alt_credentials()
+ cls.alt_manager = clients.OfficialClientManager(cls.alt_creds)
+ # Credentials from the manager are filled with both IDs and Names
+ cls.alt_creds = cls.alt_manager.credentials
cls.check_preconditions()
# TODO(mnewby) Consider looking up entities as needed instead
# of storing them as collections on the class.
cls.floating_ips = {}
cls.tenants = {}
- cls.primary_tenant = cls.TenantProperties(cls.tenant_id,
- *cls.credentials())
- cls.alt_tenant = cls.TenantProperties(cls.alt_tenant_id,
- *alt_creds)
+ creds = cls.credentials()
+ cls.primary_tenant = cls.TenantProperties(creds)
+ cls.alt_tenant = cls.TenantProperties(cls.alt_creds)
for tenant in [cls.primary_tenant, cls.alt_tenant]:
- cls.tenants[tenant.tenant_id] = tenant
+ cls.tenants[tenant.creds.tenant_id] = tenant
cls.floating_ip_access = not CONF.network.public_router_id
def cleanup_wrapper(self, resource):
@@ -175,14 +169,14 @@
def _create_tenant_security_groups(self, tenant):
access_sg = self._create_empty_security_group(
namestart='secgroup_access-',
- tenant_id=tenant.tenant_id
+ tenant_id=tenant.creds.tenant_id
)
self.addCleanup(self.cleanup_wrapper, access_sg)
# don't use default secgroup since it allows in-tenant traffic
def_sg = self._create_empty_security_group(
namestart='secgroup_general-',
- tenant_id=tenant.tenant_id
+ tenant_id=tenant.creds.tenant_id
)
self.addCleanup(self.cleanup_wrapper, def_sg)
tenant.security_groups.update(access=access_sg, default=def_sg)
@@ -239,7 +233,7 @@
],
'key_name': tenant.keypair.name,
'security_groups': security_groups,
- 'tenant_id': tenant.tenant_id
+ 'tenant_id': tenant.creds.tenant_id
}
server = self.create_server(name=name, create_kwargs=create_kwargs)
self.addCleanup(self.cleanup_wrapper, server)
@@ -248,7 +242,7 @@
def _create_tenant_servers(self, tenant, num=1):
for i in range(num):
name = 'server-{tenant}-gen-{num}-'.format(
- tenant=tenant.tenant_name,
+ tenant=tenant.creds.tenant_name,
num=i
)
name = data_utils.rand_name(name)
@@ -262,8 +256,8 @@
workaround ip namespace
"""
secgroups = [sg.name for sg in tenant.security_groups.values()]
- name = 'server-{tenant}-access_point-'.format(tenant=tenant.tenant_name
- )
+ name = 'server-{tenant}-access_point-'.format(
+ tenant=tenant.creds.tenant_name)
name = data_utils.rand_name(name)
server = self._create_server(name, tenant,
security_groups=secgroups)
@@ -277,7 +271,7 @@
self.floating_ips.setdefault(server, floating_ip)
def _create_tenant_network(self, tenant):
- network, subnet, router = self._create_networks(tenant.tenant_id)
+ network, subnet, router = self._create_networks(tenant.creds.tenant_id)
for r in [network, router, subnet]:
self.addCleanup(self.cleanup_wrapper, r)
tenant.set_network(network, subnet, router)
@@ -300,7 +294,7 @@
tenant_id = tenant_or_id
else:
tenant = tenant_or_id
- tenant_id = tenant.tenant_id
+ tenant_id = tenant.creds.tenant_id
self._set_compute_context(tenant)
self._create_tenant_keypairs(tenant_id)
self._create_tenant_network(tenant)
@@ -335,8 +329,6 @@
if should_succeed:
msg = "Timed out waiting for %s to become reachable" % ip
else:
- # todo(yfried): remove this line when bug 1252620 is fixed
- return True
msg = "%s is reachable" % ip
try:
self.assertTrue(self._check_remote_connectivity(access_point, ip,
diff --git a/tempest/scenario/utils.py b/tempest/scenario/utils.py
index 4c7b6d7..e2adb34 100644
--- a/tempest/scenario/utils.py
+++ b/tempest/scenario/utils.py
@@ -21,6 +21,7 @@
import testscenarios
import testtools
+from tempest import auth
from tempest import clients
from tempest.common.utils import misc
from tempest import config
@@ -39,9 +40,8 @@
self.non_ssh_image_pattern = \
CONF.input_scenario.non_ssh_image_regex
# Setup clients
- ocm = clients.OfficialClientManager(CONF.identity.username,
- CONF.identity.password,
- CONF.identity.tenant_name)
+ ocm = clients.OfficialClientManager(
+ auth.get_default_credentials('user'))
self.client = ocm.compute_client
def ssh_user(self, image_id):
@@ -99,9 +99,8 @@
digit=string.digits)
def __init__(self):
- ocm = clients.OfficialClientManager(CONF.identity.username,
- CONF.identity.password,
- CONF.identity.tenant_name)
+ ocm = clients.OfficialClientManager(
+ auth.get_default_credentials('user', fill_in=False))
self.client = ocm.compute_client
self.image_pattern = CONF.input_scenario.image_regex
self.flavor_pattern = CONF.input_scenario.flavor_regex
diff --git a/tempest/services/compute/json/agents_client.py b/tempest/services/compute/json/agents_client.py
index 19821e7..98d8896 100644
--- a/tempest/services/compute/json/agents_client.py
+++ b/tempest/services/compute/json/agents_client.py
@@ -15,6 +15,7 @@
import json
import urllib
+from tempest.api_schema.compute import agents as common_schema
from tempest.api_schema.compute.v2 import agents as schema
from tempest.common import rest_client
from tempest import config
@@ -37,7 +38,9 @@
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
- return resp, json.loads(body).get('agents')
+ body = json.loads(body)
+ self.validate_response(common_schema.list_agents, resp, body)
+ return resp, body['agents']
def create_agent(self, **kwargs):
"""Create an agent build."""
diff --git a/tempest/services/compute/json/aggregates_client.py b/tempest/services/compute/json/aggregates_client.py
index 54d1252..71d6f63 100644
--- a/tempest/services/compute/json/aggregates_client.py
+++ b/tempest/services/compute/json/aggregates_client.py
@@ -16,6 +16,7 @@
import json
from tempest.api_schema.compute import aggregates as schema
+from tempest.api_schema.compute.v2 import aggregates as v2_schema
from tempest.common import rest_client
from tempest import config
from tempest import exceptions
@@ -49,6 +50,7 @@
resp, body = self.post('os-aggregates', post_body)
body = json.loads(body)
+ self.validate_response(v2_schema.create_aggregate, resp, body)
return resp, body['aggregate']
def update_aggregate(self, aggregate_id, name, availability_zone=None):
@@ -66,7 +68,9 @@
def delete_aggregate(self, aggregate_id):
"""Deletes the given aggregate."""
- return self.delete("os-aggregates/%s" % str(aggregate_id))
+ resp, body = self.delete("os-aggregates/%s" % str(aggregate_id))
+ self.validate_response(v2_schema.delete_aggregate, resp, body)
+ return resp, body
def is_resource_deleted(self, id):
try:
@@ -84,6 +88,7 @@
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
post_body)
body = json.loads(body)
+ self.validate_response(schema.aggregate_add_remove_host, resp, body)
return resp, body['aggregate']
def remove_host(self, aggregate_id, host):
@@ -95,6 +100,7 @@
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
post_body)
body = json.loads(body)
+ self.validate_response(schema.aggregate_add_remove_host, resp, body)
return resp, body['aggregate']
def set_metadata(self, aggregate_id, meta):
diff --git a/tempest/services/compute/json/certificates_client.py b/tempest/services/compute/json/certificates_client.py
index c05e352..1d04628 100644
--- a/tempest/services/compute/json/certificates_client.py
+++ b/tempest/services/compute/json/certificates_client.py
@@ -15,6 +15,8 @@
import json
+from tempest.api_schema.compute import certificates as schema
+from tempest.api_schema.compute.v2 import certificates as v2schema
from tempest.common import rest_client
from tempest import config
@@ -31,6 +33,7 @@
url = "os-certificates/%s" % (id)
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.get_certificate, resp, body)
return resp, body['certificate']
def create_certificate(self):
@@ -38,4 +41,5 @@
url = "os-certificates"
resp, body = self.post(url, None)
body = json.loads(body)
+ self.validate_response(v2schema.create_certificate, resp, body)
return resp, body['certificate']
diff --git a/tempest/services/compute/json/flavors_client.py b/tempest/services/compute/json/flavors_client.py
index 65d2657..89cbe1d 100644
--- a/tempest/services/compute/json/flavors_client.py
+++ b/tempest/services/compute/json/flavors_client.py
@@ -56,6 +56,7 @@
def get_flavor_details(self, flavor_id):
resp, body = self.get("flavors/%s" % str(flavor_id))
body = json.loads(body)
+ self.validate_response(v2schema.create_get_flavor_details, resp, body)
return resp, body['flavor']
def create_flavor(self, name, ram, vcpus, disk, flavor_id, **kwargs):
@@ -79,11 +80,14 @@
resp, body = self.post('flavors', post_body)
body = json.loads(body)
+ self.validate_response(v2schema.create_get_flavor_details, resp, body)
return resp, body['flavor']
def delete_flavor(self, flavor_id):
"""Deletes the given flavor."""
- return self.delete("flavors/%s" % str(flavor_id))
+ resp, body = self.delete("flavors/{0}".format(flavor_id))
+ self.validate_response(v2schema.delete_flavor, resp, body)
+ return resp, body
def is_resource_deleted(self, id):
# Did not use get_flavor_details(id) for verification as it gives
diff --git a/tempest/services/compute/json/hosts_client.py b/tempest/services/compute/json/hosts_client.py
index e148572..342f946 100644
--- a/tempest/services/compute/json/hosts_client.py
+++ b/tempest/services/compute/json/hosts_client.py
@@ -61,6 +61,7 @@
resp, body = self.put("os-hosts/%s" % str(hostname), request_body)
body = json.loads(body)
+ self.validate_response(v2_schema.update_host, resp, body)
return resp, body
def startup_host(self, hostname):
diff --git a/tempest/services/compute/json/images_client.py b/tempest/services/compute/json/images_client.py
index bd39a04..af7752a 100644
--- a/tempest/services/compute/json/images_client.py
+++ b/tempest/services/compute/json/images_client.py
@@ -70,6 +70,7 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.list_images_details, resp, body)
return resp, body['images']
def get_image(self, image_id):
diff --git a/tempest/services/compute/json/interfaces_client.py b/tempest/services/compute/json/interfaces_client.py
index 2f165a2..cdac8b7 100644
--- a/tempest/services/compute/json/interfaces_client.py
+++ b/tempest/services/compute/json/interfaces_client.py
@@ -17,6 +17,8 @@
import time
from tempest.api_schema.compute import interfaces as common_schema
+from tempest.api_schema.compute import servers as servers_schema
+from tempest.api_schema.compute.v2 import interfaces as schema
from tempest.common import rest_client
from tempest import config
from tempest import exceptions
@@ -33,6 +35,7 @@
def list_interfaces(self, server):
resp, body = self.get('servers/%s/os-interface' % server)
body = json.loads(body)
+ self.validate_response(schema.list_interfaces, resp, body)
return resp, body['interfaceAttachments']
def create_interface(self, server, port_id=None, network_id=None,
@@ -92,6 +95,8 @@
})
resp, body = self.post('servers/%s/action' % str(server_id),
post_body)
+ self.validate_response(servers_schema.server_actions_common_schema,
+ resp, body)
return resp, body
def remove_fixed_ip(self, server_id, ip_address):
@@ -103,4 +108,6 @@
})
resp, body = self.post('servers/%s/action' % str(server_id),
post_body)
+ self.validate_response(servers_schema.server_actions_common_schema,
+ resp, body)
return resp, body
diff --git a/tempest/services/compute/json/migrations_client.py b/tempest/services/compute/json/migrations_client.py
index a13349e..beef5d2 100644
--- a/tempest/services/compute/json/migrations_client.py
+++ b/tempest/services/compute/json/migrations_client.py
@@ -15,6 +15,7 @@
import json
import urllib
+from tempest.api_schema.compute import migrations as schema
from tempest.common import rest_client
from tempest import config
@@ -36,4 +37,5 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.list_migrations, resp, body)
return resp, body['migrations']
diff --git a/tempest/services/compute/json/quotas_client.py b/tempest/services/compute/json/quotas_client.py
index 9bddf2c..7e828d8 100644
--- a/tempest/services/compute/json/quotas_client.py
+++ b/tempest/services/compute/json/quotas_client.py
@@ -110,6 +110,7 @@
post_body)
body = json.loads(body)
+ self.validate_response(schema.quota_set_update, resp, body)
return resp, body['quota_set']
def delete_quota_set(self, tenant_id):
diff --git a/tempest/services/compute/json/security_groups_client.py b/tempest/services/compute/json/security_groups_client.py
index 7411fb7..a86f3df 100644
--- a/tempest/services/compute/json/security_groups_client.py
+++ b/tempest/services/compute/json/security_groups_client.py
@@ -47,6 +47,7 @@
url = "os-security-groups/%s" % str(security_group_id)
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.get_security_group, resp, body)
return resp, body['security_group']
def create_security_group(self, name, description):
@@ -62,6 +63,7 @@
post_body = json.dumps({'security_group': post_body})
resp, body = self.post('os-security-groups', post_body)
body = json.loads(body)
+ self.validate_response(schema.get_security_group, resp, body)
return resp, body['security_group']
def update_security_group(self, security_group_id, name=None,
@@ -81,11 +83,15 @@
resp, body = self.put('os-security-groups/%s' % str(security_group_id),
post_body)
body = json.loads(body)
+ self.validate_response(schema.update_security_group, resp, body)
return resp, body['security_group']
def delete_security_group(self, security_group_id):
"""Deletes the provided Security Group."""
- return self.delete('os-security-groups/%s' % str(security_group_id))
+ resp, body = self.delete(
+ 'os-security-groups/%s' % str(security_group_id))
+ self.validate_response(schema.delete_security_group, resp, body)
+ return resp, body
def create_security_group_rule(self, parent_group_id, ip_proto, from_port,
to_port, **kwargs):
diff --git a/tempest/services/compute/json/servers_client.py b/tempest/services/compute/json/servers_client.py
index 70a950a..36bb02f 100644
--- a/tempest/services/compute/json/servers_client.py
+++ b/tempest/services/compute/json/servers_client.py
@@ -126,6 +126,7 @@
post_body = json.dumps({'server': post_body})
resp, body = self.put("servers/%s" % str(server_id), post_body)
body = json.loads(body)
+ self.validate_response(schema.update_server, resp, body)
return resp, body['server']
def get_server(self, server_id):
@@ -149,6 +150,7 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(common_schema.list_servers, resp, body)
return resp, body
def list_servers_with_detail(self, params=None):
@@ -191,6 +193,7 @@
"""Lists all addresses for a server."""
resp, body = self.get("servers/%s/ips" % str(server_id))
body = json.loads(body)
+ self.validate_response(schema.list_addresses, resp, body)
return resp, body['addresses']
def list_addresses_by_network(self, server_id, network_id):
@@ -198,23 +201,28 @@
resp, body = self.get("servers/%s/ips/%s" %
(str(server_id), network_id))
body = json.loads(body)
+ self.validate_response(schema.list_addresses_by_network, resp, body)
return resp, body
def action(self, server_id, action_name, response_key,
- schema=None, **kwargs):
+ schema=common_schema.server_actions_common_schema, **kwargs):
post_body = json.dumps({action_name: kwargs})
resp, body = self.post('servers/%s/action' % str(server_id),
post_body)
if response_key is not None:
body = json.loads(body)
- # Check for Schema as 'None' because if we donot have any server
+ # Check for Schema as 'None' because if we do not have any server
# action schema implemented yet then they can pass 'None' to skip
# the validation.Once all server action has their schema
# implemented then, this check can be removed if every actions are
# supposed to validate their response.
+ # TODO(GMann): Remove the below 'if' check once all server actions
+ # schema are implemented.
if schema is not None:
self.validate_response(schema, resp, body)
body = body[response_key]
+ else:
+ self.validate_response(schema, resp, body)
return resp, body
def create_backup(self, server_id, backup_type, rotation, name):
@@ -242,8 +250,11 @@
Note that this does not actually change the instance server
password.
"""
- return self.delete("servers/%s/os-server-password" %
- str(server_id))
+ resp, body = self.delete("servers/%s/os-server-password" %
+ str(server_id))
+ self.validate_response(common_schema.server_actions_delete_password,
+ resp, body)
+ return resp, body
def reboot(self, server_id, reboot_type):
"""Reboots a server."""
@@ -255,7 +266,7 @@
if 'disk_config' in kwargs:
kwargs['OS-DCF:diskConfig'] = kwargs['disk_config']
del kwargs['disk_config']
- return self.action(server_id, 'rebuild', 'server', **kwargs)
+ return self.action(server_id, 'rebuild', 'server', None, **kwargs)
def resize(self, server_id, flavor_ref, **kwargs):
"""Changes the flavor of a server."""
@@ -267,7 +278,9 @@
def confirm_resize(self, server_id, **kwargs):
"""Confirms the flavor change for a server."""
- return self.action(server_id, 'confirmResize', None, **kwargs)
+ return self.action(server_id, 'confirmResize',
+ None, schema.server_actions_confirm_resize,
+ **kwargs)
def revert_resize(self, server_id, **kwargs):
"""Reverts a server back to its original flavor."""
@@ -276,6 +289,7 @@
def list_server_metadata(self, server_id):
resp, body = self.get("servers/%s/metadata" % str(server_id))
body = json.loads(body)
+ self.validate_response(common_schema.list_server_metadata, resp, body)
return resp, body['metadata']
def set_server_metadata(self, server_id, meta, no_metadata_field=False):
@@ -286,6 +300,7 @@
resp, body = self.put('servers/%s/metadata' % str(server_id),
post_body)
body = json.loads(body)
+ self.validate_response(common_schema.set_server_metadata, resp, body)
return resp, body['metadata']
def update_server_metadata(self, server_id, meta):
@@ -293,11 +308,15 @@
resp, body = self.post('servers/%s/metadata' % str(server_id),
post_body)
body = json.loads(body)
+ self.validate_response(common_schema.update_server_metadata,
+ resp, body)
return resp, body['metadata']
def get_server_metadata_item(self, server_id, key):
resp, body = self.get("servers/%s/metadata/%s" % (str(server_id), key))
body = json.loads(body)
+ self.validate_response(schema.set_get_server_metadata_item,
+ resp, body)
return resp, body['meta']
def set_server_metadata_item(self, server_id, key, meta):
@@ -305,11 +324,15 @@
resp, body = self.put('servers/%s/metadata/%s' % (str(server_id), key),
post_body)
body = json.loads(body)
+ self.validate_response(schema.set_get_server_metadata_item,
+ resp, body)
return resp, body['meta']
def delete_server_metadata_item(self, server_id, key):
resp, body = self.delete("servers/%s/metadata/%s" %
(str(server_id), key))
+ self.validate_response(common_schema.delete_server_metadata_item,
+ resp, body)
return resp, body
def stop(self, server_id, **kwargs):
@@ -359,6 +382,8 @@
req_body = json.dumps({'os-migrateLive': migrate_params})
resp, body = self.post("servers/%s/action" % str(server_id), req_body)
+ self.validate_response(common_schema.server_actions_common_schema,
+ resp, body)
return resp, body
def migrate_server(self, server_id, **kwargs):
@@ -407,7 +432,7 @@
def get_console_output(self, server_id, length):
return self.action(server_id, 'os-getConsoleOutput', 'output',
- length=length)
+ common_schema.get_console_output, length=length)
def list_virtual_interfaces(self, server_id):
"""
@@ -421,7 +446,7 @@
def rescue_server(self, server_id, **kwargs):
"""Rescue the provided server."""
- return self.action(server_id, 'rescue', None, **kwargs)
+ return self.action(server_id, 'rescue', 'adminPass', None, **kwargs)
def unrescue_server(self, server_id):
"""Unrescue the provided server."""
@@ -467,3 +492,36 @@
return self.action(server_id, "os-getVNCConsole",
"console", common_schema.get_vnc_console,
type=console_type)
+
+ def create_server_group(self, name, policies):
+ """
+ Create the server group
+ name : Name of the server-group
+ policies : List of the policies - affinity/anti-affinity)
+ """
+ post_body = {
+ 'name': name,
+ 'policies': policies,
+ }
+
+ post_body = json.dumps({'server_group': post_body})
+ resp, body = self.post('os-server-groups', post_body)
+
+ body = json.loads(body)
+ return resp, body['server_group']
+
+ def delete_server_group(self, server_group_id):
+ """Delete the given server-group."""
+ return self.delete("os-server-groups/%s" % str(server_group_id))
+
+ def list_server_groups(self):
+ """List the server-groups."""
+ resp, body = self.get("os-server-groups")
+ body = json.loads(body)
+ return resp, body['server_groups']
+
+ def get_server_group(self, server_group_id):
+ """Get the details of given server_group."""
+ resp, body = self.get("os-server-groups/%s" % str(server_group_id))
+ body = json.loads(body)
+ return resp, body['server_group']
diff --git a/tempest/services/compute/json/tenant_usages_client.py b/tempest/services/compute/json/tenant_usages_client.py
index f3a67dd..f8adae7 100644
--- a/tempest/services/compute/json/tenant_usages_client.py
+++ b/tempest/services/compute/json/tenant_usages_client.py
@@ -16,6 +16,7 @@
import json
import urllib
+from tempest.api_schema.compute.v2 import tenant_usages as schema
from tempest.common import rest_client
from tempest import config
@@ -35,6 +36,7 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.list_tenant, resp, body)
return resp, body['tenant_usages'][0]
def get_tenant_usage(self, tenant_id, params=None):
@@ -44,4 +46,5 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.get_tenant, resp, body)
return resp, body['tenant_usage']
diff --git a/tempest/services/compute/v3/json/agents_client.py b/tempest/services/compute/v3/json/agents_client.py
index e1c286c..48be54c 100644
--- a/tempest/services/compute/v3/json/agents_client.py
+++ b/tempest/services/compute/v3/json/agents_client.py
@@ -15,6 +15,7 @@
import json
import urllib
+from tempest.api_schema.compute import agents as common_schema
from tempest.api_schema.compute.v3 import agents as schema
from tempest.common import rest_client
from tempest import config
@@ -34,7 +35,9 @@
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
- return resp, self._parse_resp(body)
+ body = json.loads(body)
+ self.validate_response(common_schema.list_agents, resp, body)
+ return resp, body['agents']
def create_agent(self, **kwargs):
"""Create an agent build."""
diff --git a/tempest/services/compute/v3/json/aggregates_client.py b/tempest/services/compute/v3/json/aggregates_client.py
index 0fc6af9..d9b7930 100644
--- a/tempest/services/compute/v3/json/aggregates_client.py
+++ b/tempest/services/compute/v3/json/aggregates_client.py
@@ -16,6 +16,7 @@
import json
from tempest.api_schema.compute import aggregates as schema
+from tempest.api_schema.compute.v3 import aggregates as v3_schema
from tempest.common import rest_client
from tempest import config
from tempest import exceptions
@@ -49,6 +50,7 @@
resp, body = self.post('os-aggregates', post_body)
body = json.loads(body)
+ self.validate_response(v3_schema.create_aggregate, resp, body)
return resp, body['aggregate']
def update_aggregate(self, aggregate_id, name, availability_zone=None):
@@ -66,7 +68,9 @@
def delete_aggregate(self, aggregate_id):
"""Deletes the given aggregate."""
- return self.delete("os-aggregates/%s" % str(aggregate_id))
+ resp, body = self.delete("os-aggregates/%s" % str(aggregate_id))
+ self.validate_response(v3_schema.delete_aggregate, resp, body)
+ return resp, body
def is_resource_deleted(self, id):
try:
@@ -84,6 +88,7 @@
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
post_body)
body = json.loads(body)
+ self.validate_response(v3_schema.aggregate_add_remove_host, resp, body)
return resp, body['aggregate']
def remove_host(self, aggregate_id, host):
@@ -95,6 +100,7 @@
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
post_body)
body = json.loads(body)
+ self.validate_response(v3_schema.aggregate_add_remove_host, resp, body)
return resp, body['aggregate']
def set_metadata(self, aggregate_id, meta):
diff --git a/tempest/services/compute/v3/json/certificates_client.py b/tempest/services/compute/v3/json/certificates_client.py
index f8beeb9..be9b3c3 100644
--- a/tempest/services/compute/v3/json/certificates_client.py
+++ b/tempest/services/compute/v3/json/certificates_client.py
@@ -15,6 +15,8 @@
import json
+from tempest.api_schema.compute import certificates as schema
+from tempest.api_schema.compute.v3 import certificates as v3schema
from tempest.common import rest_client
from tempest import config
@@ -31,6 +33,7 @@
url = "os-certificates/%s" % (id)
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(schema.get_certificate, resp, body)
return resp, body['certificate']
def create_certificate(self):
@@ -38,4 +41,5 @@
url = "os-certificates"
resp, body = self.post(url, None)
body = json.loads(body)
+ self.validate_response(v3schema.create_certificate, resp, body)
return resp, body['certificate']
diff --git a/tempest/services/compute/v3/json/flavors_client.py b/tempest/services/compute/v3/json/flavors_client.py
index 602fee2..5afab5a 100644
--- a/tempest/services/compute/v3/json/flavors_client.py
+++ b/tempest/services/compute/v3/json/flavors_client.py
@@ -56,6 +56,7 @@
def get_flavor_details(self, flavor_id):
resp, body = self.get("flavors/%s" % str(flavor_id))
body = json.loads(body)
+ self.validate_response(v3schema.get_flavor_details, resp, body)
return resp, body['flavor']
def create_flavor(self, name, ram, vcpus, disk, flavor_id, **kwargs):
@@ -79,11 +80,14 @@
resp, body = self.post('flavors', post_body)
body = json.loads(body)
+ self.validate_response(v3schema.create_flavor_details, resp, body)
return resp, body['flavor']
def delete_flavor(self, flavor_id):
"""Deletes the given flavor."""
- return self.delete("flavors/%s" % str(flavor_id))
+ resp, body = self.delete("flavors/{0}".format(flavor_id))
+ self.validate_response(v3schema.delete_flavor, resp, body)
+ return resp, body
def is_resource_deleted(self, id):
# Did not use get_flavor_details(id) for verification as it gives
diff --git a/tempest/services/compute/v3/json/hosts_client.py b/tempest/services/compute/v3/json/hosts_client.py
index 24d43d0..d2eb43d 100644
--- a/tempest/services/compute/v3/json/hosts_client.py
+++ b/tempest/services/compute/v3/json/hosts_client.py
@@ -61,6 +61,7 @@
resp, body = self.put("os-hosts/%s" % str(hostname), request_body)
body = json.loads(body)
+ self.validate_response(v3_schema.update_host, resp, body)
return resp, body
def startup_host(self, hostname):
diff --git a/tempest/services/compute/v3/json/interfaces_client.py b/tempest/services/compute/v3/json/interfaces_client.py
index 25c8db7..e66ccaa 100644
--- a/tempest/services/compute/v3/json/interfaces_client.py
+++ b/tempest/services/compute/v3/json/interfaces_client.py
@@ -17,6 +17,8 @@
import time
from tempest.api_schema.compute import interfaces as common_schema
+from tempest.api_schema.compute import servers as servers_schema
+from tempest.api_schema.compute.v3 import interfaces as schema
from tempest.common import rest_client
from tempest import config
from tempest import exceptions
@@ -33,6 +35,7 @@
def list_interfaces(self, server):
resp, body = self.get('servers/%s/os-attach-interfaces' % server)
body = json.loads(body)
+ self.validate_response(schema.list_interfaces, resp, body)
return resp, body['interface_attachments']
def create_interface(self, server, port_id=None, network_id=None,
@@ -93,6 +96,8 @@
})
resp, body = self.post('servers/%s/action' % str(server_id),
post_body)
+ self.validate_response(servers_schema.server_actions_common_schema,
+ resp, body)
return resp, body
def remove_fixed_ip(self, server_id, ip_address):
@@ -104,4 +109,6 @@
})
resp, body = self.post('servers/%s/action' % str(server_id),
post_body)
+ self.validate_response(servers_schema.server_actions_common_schema,
+ resp, body)
return resp, body
diff --git a/tempest/services/compute/v3/json/migration_client.py b/tempest/services/compute/v3/json/migration_client.py
index efd39b7..c821567 100644
--- a/tempest/services/compute/v3/json/migration_client.py
+++ b/tempest/services/compute/v3/json/migration_client.py
@@ -12,8 +12,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+import json
import urllib
+from tempest.api_schema.compute import migrations as schema
from tempest.common import rest_client
from tempest import config
@@ -34,4 +36,6 @@
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
- return resp, self._parse_resp(body)
+ body = json.loads(body)
+ self.validate_response(schema.list_migrations, resp, body)
+ return resp, body['migrations']
diff --git a/tempest/services/compute/v3/json/servers_client.py b/tempest/services/compute/v3/json/servers_client.py
index bbffc13..eed85c7 100644
--- a/tempest/services/compute/v3/json/servers_client.py
+++ b/tempest/services/compute/v3/json/servers_client.py
@@ -126,6 +126,7 @@
post_body = json.dumps({'server': post_body})
resp, body = self.put("servers/%s" % str(server_id), post_body)
body = json.loads(body)
+ self.validate_response(schema.update_server, resp, body)
return resp, body['server']
def get_server(self, server_id):
@@ -149,6 +150,7 @@
resp, body = self.get(url)
body = json.loads(body)
+ self.validate_response(common_schema.list_servers, resp, body)
return resp, body
def list_servers_with_detail(self, params=None):
@@ -191,6 +193,7 @@
"""Lists all addresses for a server."""
resp, body = self.get("servers/%s/ips" % str(server_id))
body = json.loads(body)
+ self.validate_response(schema.list_addresses, resp, body)
return resp, body['addresses']
def list_addresses_by_network(self, server_id, network_id):
@@ -198,14 +201,28 @@
resp, body = self.get("servers/%s/ips/%s" %
(str(server_id), network_id))
body = json.loads(body)
+ self.validate_response(schema.list_addresses_by_network, resp, body)
return resp, body
- def action(self, server_id, action_name, response_key, **kwargs):
+ def action(self, server_id, action_name, response_key,
+ schema=common_schema.server_actions_common_schema, **kwargs):
post_body = json.dumps({action_name: kwargs})
resp, body = self.post('servers/%s/action' % str(server_id),
post_body)
if response_key is not None:
- body = json.loads(body)[response_key]
+ body = json.loads(body)
+ # Check for Schema as 'None' because if we do not have any server
+ # action schema implemented yet then they can pass 'None' to skip
+ # the validation.Once all server action has their schema
+ # implemented then, this check can be removed if every actions are
+ # supposed to validate their response.
+ # TODO(GMann): Remove the below 'if' check once all server actions
+ # schema are implemented.
+ if schema is not None:
+ self.validate_response(schema, resp, body)
+ body = body[response_key]
+ else:
+ self.validate_response(schema, resp, body)
return resp, body
def create_backup(self, server_id, backup_type, rotation, name):
@@ -217,7 +234,8 @@
def change_password(self, server_id, admin_password):
"""Changes the root password for the server."""
- return self.action(server_id, 'change_password', None,
+ return self.action(server_id, 'change_password',
+ None, schema.server_actions_change_password,
admin_password=admin_password)
def get_password(self, server_id):
@@ -233,8 +251,11 @@
Note that this does not actually change the instance server
password.
"""
- return self.delete("servers/%s/os-server-password" %
- str(server_id))
+ resp, body = self.delete("servers/%s/os-server-password" %
+ str(server_id))
+ self.validate_response(common_schema.server_actions_delete_password,
+ resp, body)
+ return resp, body
def reboot(self, server_id, reboot_type):
"""Reboots a server."""
@@ -246,7 +267,7 @@
if 'disk_config' in kwargs:
kwargs['os-disk-config:disk_config'] = kwargs['disk_config']
del kwargs['disk_config']
- return self.action(server_id, 'rebuild', 'server', **kwargs)
+ return self.action(server_id, 'rebuild', 'server', None, **kwargs)
def resize(self, server_id, flavor_ref, **kwargs):
"""Changes the flavor of a server."""
@@ -284,6 +305,7 @@
def list_server_metadata(self, server_id):
resp, body = self.get("servers/%s/metadata" % str(server_id))
body = json.loads(body)
+ self.validate_response(common_schema.list_server_metadata, resp, body)
return resp, body['metadata']
def set_server_metadata(self, server_id, meta, no_metadata_field=False):
@@ -294,6 +316,7 @@
resp, body = self.put('servers/%s/metadata' % str(server_id),
post_body)
body = json.loads(body)
+ self.validate_response(common_schema.set_server_metadata, resp, body)
return resp, body['metadata']
def update_server_metadata(self, server_id, meta):
@@ -301,11 +324,14 @@
resp, body = self.post('servers/%s/metadata' % str(server_id),
post_body)
body = json.loads(body)
+ self.validate_response(schema.update_server_metadata, resp, body)
return resp, body['metadata']
def get_server_metadata_item(self, server_id, key):
resp, body = self.get("servers/%s/metadata/%s" % (str(server_id), key))
body = json.loads(body)
+ self.validate_response(schema.set_get_server_metadata_item,
+ resp, body)
return resp, body['metadata']
def set_server_metadata_item(self, server_id, key, meta):
@@ -313,11 +339,15 @@
resp, body = self.put('servers/%s/metadata/%s' % (str(server_id), key),
post_body)
body = json.loads(body)
+ self.validate_response(schema.set_get_server_metadata_item,
+ resp, body)
return resp, body['metadata']
def delete_server_metadata_item(self, server_id, key):
resp, body = self.delete("servers/%s/metadata/%s" %
(str(server_id), key))
+ self.validate_response(common_schema.delete_server_metadata_item,
+ resp, body)
return resp, body
def stop(self, server_id, **kwargs):
@@ -353,6 +383,8 @@
resp, body = self.post("servers/%s/action" % str(server_id),
req_body)
+ self.validate_response(common_schema.server_actions_common_schema,
+ resp, body)
return resp, body
def migrate_server(self, server_id, **kwargs):
@@ -401,11 +433,12 @@
def get_console_output(self, server_id, length):
return self.action(server_id, 'get_console_output', 'output',
- length=length)
+ common_schema.get_console_output, length=length)
def rescue_server(self, server_id, **kwargs):
"""Rescue the provided server."""
- return self.action(server_id, 'rescue', None, **kwargs)
+ return self.action(server_id, 'rescue', 'admin_password',
+ None, **kwargs)
def unrescue_server(self, server_id):
"""Unrescue the provided server."""
@@ -459,3 +492,13 @@
def inject_network_info(self, server_id, **kwargs):
"""Inject the Network Info into server"""
return self.action(server_id, 'inject_network_info', None, **kwargs)
+
+ def get_spice_console(self, server_id, console_type):
+ """Get URL of Spice console."""
+ return self.action(server_id, "get_spice_console"
+ "console", None, type=console_type)
+
+ def get_rdp_console(self, server_id, console_type):
+ """Get URL of RDP console."""
+ return self.action(server_id, "get_rdp_console"
+ "console", None, type=console_type)
diff --git a/tempest/services/data_processing/v1_1/client.py b/tempest/services/data_processing/v1_1/client.py
index c7b5f93..73e67c3 100644
--- a/tempest/services/data_processing/v1_1/client.py
+++ b/tempest/services/data_processing/v1_1/client.py
@@ -1,17 +1,16 @@
# Copyright (c) 2013 Mirantis Inc.
#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
import json
@@ -128,3 +127,62 @@
uri = 'cluster-templates/%s' % tmpl_id
return self.delete(uri)
+
+ def list_data_sources(self):
+ """List all data sources for a user."""
+
+ uri = 'data-sources'
+ return self._request_and_parse(self.get, uri, 'data_sources')
+
+ def get_data_source(self, source_id):
+ """Returns the details of a single data source."""
+
+ uri = 'data-sources/%s' % source_id
+ return self._request_and_parse(self.get, uri, 'data_source')
+
+ def create_data_source(self, name, data_source_type, url, **kwargs):
+ """Creates data source with specified params.
+
+ It supports passing additional params using kwargs and returns created
+ object.
+ """
+ uri = 'data-sources'
+ body = kwargs.copy()
+ body.update({
+ 'name': name,
+ 'type': data_source_type,
+ 'url': url
+ })
+ return self._request_and_parse(self.post, uri, 'data_source',
+ body=json.dumps(body))
+
+ def delete_data_source(self, source_id):
+ """Deletes the specified data source by id."""
+
+ uri = 'data-sources/%s' % source_id
+ return self.delete(uri)
+
+ def list_job_binary_internals(self):
+ """List all job binary internals for a user."""
+
+ uri = 'job-binary-internals'
+ return self._request_and_parse(self.get, uri, 'binaries')
+
+ def get_job_binary_internal(self, job_binary_id):
+ """Returns the details of a single job binary internal."""
+
+ uri = 'job-binary-internals/%s' % job_binary_id
+ return self._request_and_parse(self.get, uri, 'job_binary_internal')
+
+ def create_job_binary_internal(self, name, data):
+ """Creates job binary internal with specified params."""
+
+ uri = 'job-binary-internals/%s' % name
+ return self._request_and_parse(self.put, uri, 'job_binary_internal',
+ data)
+
+ def delete_job_binary_internal(self, job_binary_id):
+ """Deletes the specified job binary internal by id."""
+
+ uri = 'job-binary-internals/%s' % job_binary_id
+ return self.delete(uri)
diff --git a/tempest/services/database/json/versions_client.py b/tempest/services/database/json/versions_client.py
new file mode 100644
index 0000000..0269c43
--- /dev/null
+++ b/tempest/services/database/json/versions_client.py
@@ -0,0 +1,38 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import urllib
+
+from tempest.common import rest_client
+from tempest import config
+
+CONF = config.CONF
+
+
+class DatabaseVersionsClientJSON(rest_client.RestClient):
+
+ def __init__(self, auth_provider):
+ super(DatabaseVersionsClientJSON, self).__init__(auth_provider)
+ self.skip_path()
+ self.service = CONF.database.catalog_type
+
+ def list_db_versions(self, params=None):
+ """List all versions."""
+ url = ''
+ if params:
+ url += '?%s' % urllib.urlencode(params)
+
+ resp, body = self.get(url)
+ return resp, self._parse_resp(body)
diff --git a/tempest/services/identity/json/identity_client.py b/tempest/services/identity/json/identity_client.py
index 55239f7..479a289 100644
--- a/tempest/services/identity/json/identity_client.py
+++ b/tempest/services/identity/json/identity_client.py
@@ -227,6 +227,16 @@
url = '/OS-KSADM/services/%s' % service_id
return self.delete(url)
+ def update_user_password(self, user_id, new_pass):
+ """Update User Password."""
+ put_body = {
+ 'password': new_pass,
+ 'id': user_id
+ }
+ put_body = json.dumps({'user': put_body})
+ resp, body = self.put('users/%s/OS-KSADM/password' % user_id, put_body)
+ return resp, self._parse_resp(body)
+
class TokenClientJSON(IdentityClientJSON):
diff --git a/tempest/services/identity/v3/json/region_client.py b/tempest/services/identity/v3/json/region_client.py
new file mode 100644
index 0000000..f95d00f
--- /dev/null
+++ b/tempest/services/identity/v3/json/region_client.py
@@ -0,0 +1,80 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import urllib
+
+from tempest.common import rest_client
+from tempest import config
+
+CONF = config.CONF
+
+
+class RegionClientJSON(rest_client.RestClient):
+
+ def __init__(self, auth_provider):
+ super(RegionClientJSON, self).__init__(auth_provider)
+ self.service = CONF.identity.catalog_type
+ self.endpoint_url = 'adminURL'
+ self.api_version = "v3"
+
+ def create_region(self, description, **kwargs):
+ """Create region."""
+ req_body = {
+ 'description': description,
+ }
+ if kwargs.get('parent_region_id'):
+ req_body['parent_region_id'] = kwargs.get('parent_region_id')
+ req_body = json.dumps({'region': req_body})
+ if kwargs.get('unique_region_id'):
+ resp, body = self.put(
+ 'regions/%s' % kwargs.get('unique_region_id'), req_body)
+ else:
+ resp, body = self.post('regions', req_body)
+ body = json.loads(body)
+ return resp, body['region']
+
+ def update_region(self, region_id, **kwargs):
+ """Updates a region."""
+ post_body = {}
+ if 'description' in kwargs:
+ post_body['description'] = kwargs.get('description')
+ if 'parent_region_id' in kwargs:
+ post_body['parent_region_id'] = kwargs.get('parent_region_id')
+ post_body = json.dumps({'region': post_body})
+ resp, body = self.patch('regions/%s' % region_id, post_body)
+ body = json.loads(body)
+ return resp, body['region']
+
+ def get_region(self, region_id):
+ """Get region."""
+ url = 'regions/%s' % region_id
+ resp, body = self.get(url)
+ body = json.loads(body)
+ return resp, body['region']
+
+ def list_regions(self, params=None):
+ """List regions."""
+ url = 'regions'
+ if params:
+ url += '?%s' % urllib.urlencode(params)
+ resp, body = self.get(url)
+ body = json.loads(body)
+ return resp, body['regions']
+
+ def delete_region(self, region_id):
+ """Delete region."""
+ resp, body = self.delete('regions/%s' % region_id)
+ return resp, body
diff --git a/tempest/services/identity/v3/xml/region_client.py b/tempest/services/identity/v3/xml/region_client.py
new file mode 100644
index 0000000..9f9161d
--- /dev/null
+++ b/tempest/services/identity/v3/xml/region_client.py
@@ -0,0 +1,120 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import urllib
+
+from lxml import etree
+
+from tempest.common import http
+from tempest.common import rest_client
+from tempest.common import xml_utils as common
+from tempest import config
+
+CONF = config.CONF
+
+XMLNS = "http://docs.openstack.org/identity/api/v3"
+
+
+class RegionClientXML(rest_client.RestClient):
+ TYPE = "xml"
+
+ def __init__(self, auth_provider):
+ super(RegionClientXML, self).__init__(auth_provider)
+ self.service = CONF.identity.catalog_type
+ self.region_url = 'adminURL'
+ self.api_version = "v3"
+
+ def _parse_array(self, node):
+ array = []
+ for child in node.getchildren():
+ tag_list = child.tag.split('}', 1)
+ if tag_list[1] == "region":
+ array.append(common.xml_to_json(child))
+ return array
+
+ def _parse_body(self, body):
+ json = common.xml_to_json(body)
+ return json
+
+ def request(self, method, url, extra_headers=False, headers=None,
+ body=None, wait=None):
+ """Overriding the existing HTTP request in super class RestClient."""
+ if extra_headers:
+ try:
+ headers.update(self.get_headers())
+ except (ValueError, TypeError):
+ headers = self.get_headers()
+ dscv = CONF.identity.disable_ssl_certificate_validation
+ self.http_obj = http.ClosingHttp(
+ disable_ssl_certificate_validation=dscv)
+ return super(RegionClientXML, self).request(method, url,
+ extra_headers,
+ headers=headers,
+ body=body)
+
+ def create_region(self, description, **kwargs):
+ """Create region."""
+ create_region = common.Element("region",
+ xmlns=XMLNS,
+ description=description)
+ if 'parent_region_id' in kwargs:
+ create_region.append(common.Element(
+ 'parent_region_id', kwargs.get('parent_region_id')))
+ if 'unique_region_id' in kwargs:
+ resp, body = self.put(
+ 'regions/%s' % kwargs.get('unique_region_id'),
+ str(common.Document(create_region)))
+ else:
+ resp, body = self.post('regions',
+ str(common.Document(create_region)))
+ body = self._parse_body(etree.fromstring(body))
+ return resp, body
+
+ def update_region(self, region_id, **kwargs):
+ """Updates an region with given parameters.
+ """
+ description = kwargs.get('description', None)
+ update_region = common.Element("region",
+ xmlns=XMLNS,
+ description=description)
+ if 'parent_region_id' in kwargs:
+ update_region.append(common.Element('parent_region_id',
+ kwargs.get('parent_region_id')))
+
+ resp, body = self.patch('regions/%s' % str(region_id),
+ str(common.Document(update_region)))
+ body = self._parse_body(etree.fromstring(body))
+ return resp, body
+
+ def get_region(self, region_id):
+ """Get Region."""
+ url = 'regions/%s' % region_id
+ resp, body = self.get(url)
+ body = self._parse_body(etree.fromstring(body))
+ return resp, body
+
+ def list_regions(self, params=None):
+ """Get the list of regions."""
+ url = 'regions'
+ if params:
+ url += '?%s' % urllib.urlencode(params)
+ resp, body = self.get(url)
+ body = self._parse_array(etree.fromstring(body))
+ return resp, body
+
+ def delete_region(self, region_id):
+ """Delete region."""
+ resp, body = self.delete('regions/%s' % region_id)
+ return resp, body
diff --git a/tempest/services/identity/xml/identity_client.py b/tempest/services/identity/xml/identity_client.py
index c48bc90..b213c1a 100644
--- a/tempest/services/identity/xml/identity_client.py
+++ b/tempest/services/identity/xml/identity_client.py
@@ -118,6 +118,15 @@
str(xml.Document(create_service)))
return resp, self._parse_resp(body)
+ def update_user_password(self, user_id, new_pass):
+ """Update User Password."""
+ put_body = xml.Element("user",
+ id=user_id,
+ password=new_pass)
+ resp, body = self.put('users/%s/OS-KSADM/password' % user_id,
+ str(xml.Document(put_body)))
+ return resp, self._parse_resp(body)
+
class TokenClientXML(identity_client.TokenClientJSON):
TYPE = "xml"
diff --git a/tempest/services/image/v2/json/image_client.py b/tempest/services/image/v2/json/image_client.py
index b3014fc..201869e 100644
--- a/tempest/services/image/v2/json/image_client.py
+++ b/tempest/services/image/v2/json/image_client.py
@@ -70,13 +70,12 @@
"disk_format": disk_format,
}
- for option in ['visibility']:
- if option in kwargs:
- value = kwargs.get(option)
- if isinstance(value, dict) or isinstance(value, tuple):
- params.update(value)
- else:
- params[option] = value
+ for option in kwargs:
+ value = kwargs.get(option)
+ if isinstance(value, dict) or isinstance(value, tuple):
+ params.update(value)
+ else:
+ params[option] = value
data = json.dumps(params)
self._validate_schema(data)
diff --git a/tempest/services/network/json/network_client.py b/tempest/services/network/json/network_client.py
index f9dd8ef..8e53b8d 100644
--- a/tempest/services/network/json/network_client.py
+++ b/tempest/services/network/json/network_client.py
@@ -165,21 +165,6 @@
resp, body = self.delete(uri)
return resp, body
- def create_vpnservice(self, subnet_id, router_id, **kwargs):
- post_body = {
- "vpnservice": {
- "subnet_id": subnet_id,
- "router_id": router_id
- }
- }
- for key, val in kwargs.items():
- post_body['vpnservice'][key] = val
- body = json.dumps(post_body)
- uri = '%s/vpn/vpnservices' % (self.uri_prefix)
- resp, body = self.post(uri, body)
- body = json.loads(body)
- return resp, body
-
def list_router_interfaces(self, uuid):
uri = '%s/ports?device_id=%s' % (self.uri_prefix, uuid)
resp, body = self.get(uri)
diff --git a/tempest/services/network/xml/network_client.py b/tempest/services/network/xml/network_client.py
index 50a1954..a7a6b2c 100644
--- a/tempest/services/network/xml/network_client.py
+++ b/tempest/services/network/xml/network_client.py
@@ -24,7 +24,7 @@
# list of plurals used for xml serialization
PLURALS = ['dns_nameservers', 'host_routes', 'allocation_pools',
'fixed_ips', 'extensions', 'extra_dhcp_opts', 'pools',
- 'health_monitors', 'vips', 'members']
+ 'health_monitors', 'vips', 'members', 'allowed_address_pairs']
def get_rest_client(self, auth_provider):
rc = rest_client.RestClient(auth_provider)
@@ -257,38 +257,6 @@
body = _root_tag_fetcher_and_xml_to_json_parse(body)
return resp, body
- def create_vpnservice(self, subnet_id, router_id, **kwargs):
- uri = '%s/vpn/vpnservices' % (self.uri_prefix)
- vpnservice = common.Element("vpnservice")
- p1 = common.Element("subnet_id", subnet_id)
- p2 = common.Element("router_id", router_id)
- vpnservice.append(p1)
- vpnservice.append(p2)
- common.deep_dict_to_xml(vpnservice, kwargs)
- resp, body = self.post(uri, str(common.Document(vpnservice)))
- body = _root_tag_fetcher_and_xml_to_json_parse(body)
- return resp, body
-
- def create_ikepolicy(self, name, **kwargs):
- uri = '%s/vpn/ikepolicies' % (self.uri_prefix)
- ikepolicy = common.Element("ikepolicy")
- p1 = common.Element("name", name)
- ikepolicy.append(p1)
- common.deep_dict_to_xml(ikepolicy, kwargs)
- resp, body = self.post(uri, str(common.Document(ikepolicy)))
- body = _root_tag_fetcher_and_xml_to_json_parse(body)
- return resp, body
-
- def create_ipsecpolicy(self, name, **kwargs):
- uri = '%s/vpn/ipsecpolicies' % (self.uri_prefix)
- ipsecpolicy = common.Element("ipsecpolicy")
- p1 = common.Element("name", name)
- ipsecpolicy.append(p1)
- common.deep_dict_to_xml(ipsecpolicy, kwargs)
- resp, body = self.post(uri, str(common.Document(ipsecpolicy)))
- body = _root_tag_fetcher_and_xml_to_json_parse(body)
- return resp, body
-
def _root_tag_fetcher_and_xml_to_json_parse(xml_returned_body):
body = ET.fromstring(xml_returned_body)
diff --git a/tempest/services/orchestration/json/orchestration_client.py b/tempest/services/orchestration/json/orchestration_client.py
index 2311bdd..c459f28 100644
--- a/tempest/services/orchestration/json/orchestration_client.py
+++ b/tempest/services/orchestration/json/orchestration_client.py
@@ -45,28 +45,32 @@
return resp, body['stacks']
def create_stack(self, name, disable_rollback=True, parameters={},
- timeout_mins=60, template=None, template_url=None):
+ timeout_mins=60, template=None, template_url=None,
+ environment=None, files=None):
headers, body = self._prepare_update_create(
name,
disable_rollback,
parameters,
timeout_mins,
template,
- template_url)
+ template_url,
+ environment,
+ files)
uri = 'stacks'
resp, body = self.post(uri, headers=headers, body=body)
return resp, body
def update_stack(self, stack_identifier, name, disable_rollback=True,
parameters={}, timeout_mins=60, template=None,
- template_url=None):
+ template_url=None, environment=None, files=None):
headers, body = self._prepare_update_create(
name,
disable_rollback,
parameters,
timeout_mins,
template,
- template_url)
+ template_url,
+ environment)
uri = "stacks/%s" % stack_identifier
resp, body = self.put(uri, headers=headers, body=body)
@@ -74,13 +78,16 @@
def _prepare_update_create(self, name, disable_rollback=True,
parameters={}, timeout_mins=60,
- template=None, template_url=None):
+ template=None, template_url=None,
+ environment=None, files=None):
post_body = {
"stack_name": name,
"disable_rollback": disable_rollback,
"parameters": parameters,
"timeout_mins": timeout_mins,
- "template": "HeatTemplateFormatVersion: '2012-12-12'\n"
+ "template": "HeatTemplateFormatVersion: '2012-12-12'\n",
+ "environment": environment,
+ "files": files
}
if template:
post_body['template'] = template
diff --git a/tempest/services/queuing/json/queuing_client.py b/tempest/services/queuing/json/queuing_client.py
index 4a0c495..e5978f5 100644
--- a/tempest/services/queuing/json/queuing_client.py
+++ b/tempest/services/queuing/json/queuing_client.py
@@ -15,6 +15,7 @@
import json
+from tempest.api_schema.queuing.v1 import queues as queues_schema
from tempest.common import rest_client
from tempest import config
@@ -33,6 +34,7 @@
uri = '{0}/queues'.format(self.uri_prefix)
resp, body = self.get(uri)
body = json.loads(body)
+ self.validate_response(queues_schema.list_queues, resp, body)
return resp, body
def create_queue(self, queue_name):
@@ -43,16 +45,32 @@
def get_queue(self, queue_name):
uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
resp, body = self.get(uri)
- body = json.loads(body)
return resp, body
def head_queue(self, queue_name):
uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
resp, body = self.head(uri)
- body = json.loads(body)
return resp, body
def delete_queue(self, queue_name):
uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
resp = self.delete(uri)
return resp
+
+ def get_queue_stats(self, queue_name):
+ uri = '{0}/queues/{1}/stats'.format(self.uri_prefix, queue_name)
+ resp, body = self.get(uri)
+ body = json.loads(body)
+ self.validate_response(queues_schema.queue_stats, resp, body)
+ return resp, body
+
+ def get_queue_metadata(self, queue_name):
+ uri = '{0}/queues/{1}/metadata'.format(self.uri_prefix, queue_name)
+ resp, body = self.get(uri)
+ body = json.loads(body)
+ return resp, body
+
+ def set_queue_metadata(self, queue_name, rbody):
+ uri = '{0}/queues/{1}/metadata'.format(self.uri_prefix, queue_name)
+ resp, body = self.put(uri, body=json.dumps(rbody))
+ return resp, body
diff --git a/tempest/services/telemetry/telemetry_client_base.py b/tempest/services/telemetry/telemetry_client_base.py
index 610f07b..a073f54 100644
--- a/tempest/services/telemetry/telemetry_client_base.py
+++ b/tempest/services/telemetry/telemetry_client_base.py
@@ -73,7 +73,10 @@
return resp, body
def put(self, uri, body):
- return self.rest_client.put(uri, body)
+ body = self.serialize(body)
+ resp, body = self.rest_client.put(uri, body)
+ body = self.deserialize(body)
+ return resp, body
def get(self, uri):
resp, body = self.rest_client.get(uri)
@@ -133,3 +136,15 @@
def create_alarm(self, **kwargs):
uri = "%s/alarms" % self.uri_prefix
return self.post(uri, kwargs)
+
+ def update_alarm(self, alarm_id, **kwargs):
+ uri = "%s/alarms/%s" % (self.uri_prefix, alarm_id)
+ return self.put(uri, kwargs)
+
+ def alarm_get_state(self, alarm_id):
+ uri = "%s/alarms/%s/state" % (self.uri_prefix, alarm_id)
+ return self.get(uri)
+
+ def alarm_set_state(self, alarm_id, state):
+ uri = "%s/alarms/%s/state" % (self.uri_prefix, alarm_id)
+ return self.put(uri, state)
diff --git a/tempest/services/volume/json/admin/volume_quotas_client.py b/tempest/services/volume/json/admin/volume_quotas_client.py
index ea9c92e..961c7da 100644
--- a/tempest/services/volume/json/admin/volume_quotas_client.py
+++ b/tempest/services/volume/json/admin/volume_quotas_client.py
@@ -77,3 +77,7 @@
post_body = jsonutils.dumps({'quota_set': post_body})
resp, body = self.put('os-quota-sets/%s' % tenant_id, post_body)
return resp, self._parse_resp(body)
+
+ def delete_quota_set(self, tenant_id):
+ """Delete the tenant's quota set."""
+ return self.delete('os-quota-sets/%s' % tenant_id)
diff --git a/tempest/services/volume/json/admin/volume_services_client.py b/tempest/services/volume/json/admin/volume_services_client.py
new file mode 100644
index 0000000..d43c04a
--- /dev/null
+++ b/tempest/services/volume/json/admin/volume_services_client.py
@@ -0,0 +1,38 @@
+# Copyright 2014 NEC Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import urllib
+
+from tempest.common import rest_client
+from tempest import config
+
+CONF = config.CONF
+
+
+class VolumesServicesClientJSON(rest_client.RestClient):
+
+ def __init__(self, auth_provider):
+ super(VolumesServicesClientJSON, self).__init__(auth_provider)
+ self.service = CONF.volume.catalog_type
+
+ def list_services(self, params=None):
+ url = 'os-services'
+ if params:
+ url += '?%s' % urllib.urlencode(params)
+
+ resp, body = self.get(url)
+ body = json.loads(body)
+ return resp, body['services']
diff --git a/tempest/services/volume/json/admin/volume_types_client.py b/tempest/services/volume/json/admin/volume_types_client.py
index c9c0582..65ecc67 100644
--- a/tempest/services/volume/json/admin/volume_types_client.py
+++ b/tempest/services/volume/json/admin/volume_types_client.py
@@ -18,6 +18,7 @@
from tempest.common import rest_client
from tempest import config
+from tempest import exceptions
CONF = config.CONF
@@ -34,6 +35,26 @@
self.build_interval = CONF.volume.build_interval
self.build_timeout = CONF.volume.build_timeout
+ def is_resource_deleted(self, resource):
+ # to use this method self.resource must be defined to respective value
+ # Resource is a dictionary containing resource id and type
+ # Resource : {"id" : resource_id
+ # "type": resource_type}
+ try:
+ if resource['type'] == "volume-type":
+ self.get_volume_type(resource['id'])
+ elif resource['type'] == "encryption-type":
+ resp, body = self.get_encryption_type(resource['id'])
+ assert 200 == resp.status
+ if not body:
+ return True
+ else:
+ msg = (" resource value is either not defined or incorrect.")
+ raise exceptions.UnprocessableEntity(msg)
+ except exceptions.NotFound:
+ return True
+ return False
+
def list_volume_types(self, params=None):
"""List all the volume_types created."""
url = 'types'
@@ -150,3 +171,7 @@
resp, body = self.post(url, post_body)
body = json.loads(body)
return resp, body['encryption']
+
+ def delete_encryption_type(self, vol_type_id):
+ """Delete the encryption type for the specified volume-type."""
+ return self.delete("/types/%s/encryption/provider" % str(vol_type_id))
diff --git a/tempest/services/volume/json/availability_zone_client.py b/tempest/services/volume/json/availability_zone_client.py
new file mode 100644
index 0000000..6839d3a
--- /dev/null
+++ b/tempest/services/volume/json/availability_zone_client.py
@@ -0,0 +1,34 @@
+# Copyright 2014 NEC Corporation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+
+from tempest.common import rest_client
+from tempest import config
+
+CONF = config.CONF
+
+
+class VolumeAvailabilityZoneClientJSON(rest_client.RestClient):
+
+ def __init__(self, auth_provider):
+ super(VolumeAvailabilityZoneClientJSON, self).__init__(
+ auth_provider)
+ self.service = CONF.volume.catalog_type
+
+ def get_availability_zone_list(self):
+ resp, body = self.get('os-availability-zone')
+ body = json.loads(body)
+ return resp, body['availabilityZoneInfo']
diff --git a/tempest/services/volume/xml/admin/volume_quotas_client.py b/tempest/services/volume/xml/admin/volume_quotas_client.py
index 710fb3a..a38410b 100644
--- a/tempest/services/volume/xml/admin/volume_quotas_client.py
+++ b/tempest/services/volume/xml/admin/volume_quotas_client.py
@@ -14,7 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-from ast import literal_eval
+import ast
from lxml import etree
from tempest.common import xml_utils as xml
@@ -35,7 +35,7 @@
quota = {}
for k, v in q.items():
try:
- v = literal_eval(v)
+ v = ast.literal_eval(v)
except (ValueError, SyntaxError):
pass
@@ -68,3 +68,7 @@
str(xml.Document(element)))
body = xml.xml_to_json(etree.fromstring(body))
return resp, self._format_quota(body)
+
+ def delete_quota_set(self, tenant_id):
+ """Delete the tenant's quota set."""
+ return self.delete('os-quota-sets/%s' % tenant_id)
diff --git a/tempest/services/volume/xml/admin/volume_services_client.py b/tempest/services/volume/xml/admin/volume_services_client.py
new file mode 100644
index 0000000..7bad16d
--- /dev/null
+++ b/tempest/services/volume/xml/admin/volume_services_client.py
@@ -0,0 +1,42 @@
+# Copyright 2014 NEC Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import urllib
+
+from lxml import etree
+
+from tempest.common import rest_client
+from tempest.common import xml_utils
+from tempest import config
+
+CONF = config.CONF
+
+
+class VolumesServicesClientXML(rest_client.RestClient):
+ TYPE = "xml"
+
+ def __init__(self, auth_provider):
+ super(VolumesServicesClientXML, self).__init__(auth_provider)
+ self.service = CONF.volume.catalog_type
+
+ def list_services(self, params=None):
+ url = 'os-services'
+ if params:
+ url += '?%s' % urllib.urlencode(params)
+
+ resp, body = self.get(url)
+ node = etree.fromstring(body)
+ body = [xml_utils.xml_to_json(x) for x in node.getchildren()]
+ return resp, body
diff --git a/tempest/services/volume/xml/availability_zone_client.py b/tempest/services/volume/xml/availability_zone_client.py
new file mode 100644
index 0000000..e4a004a
--- /dev/null
+++ b/tempest/services/volume/xml/availability_zone_client.py
@@ -0,0 +1,39 @@
+# Copyright 2014 NEC Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+
+from tempest.common import rest_client
+from tempest.common import xml_utils
+from tempest import config
+
+CONF = config.CONF
+
+
+class VolumeAvailabilityZoneClientXML(rest_client.RestClient):
+ TYPE = "xml"
+
+ def __init__(self, auth_provider):
+ super(VolumeAvailabilityZoneClientXML, self).__init__(
+ auth_provider)
+ self.service = CONF.volume.catalog_type
+
+ def _parse_array(self, node):
+ return [xml_utils.xml_to_json(x) for x in node]
+
+ def get_availability_zone_list(self):
+ resp, body = self.get('os-availability-zone')
+ availability_zone = self._parse_array(etree.fromstring(body))
+ return resp, availability_zone
diff --git a/tempest/services/volume/xml/volumes_client.py b/tempest/services/volume/xml/volumes_client.py
index 65bc321..9799e55 100644
--- a/tempest/services/volume/xml/volumes_client.py
+++ b/tempest/services/volume/xml/volumes_client.py
@@ -26,6 +26,11 @@
CONF = config.CONF
+VOLUME_NS_BASE = 'http://docs.openstack.org/volume/ext/'
+VOLUME_HOST_NS = VOLUME_NS_BASE + 'volume_host_attribute/api/v1'
+VOLUME_MIG_STATUS_NS = VOLUME_NS_BASE + 'volume_mig_status_attribute/api/v1'
+VOLUMES_TENANT_NS = VOLUME_NS_BASE + 'volume_tenant_attribute/api/v1'
+
class VolumesClientXML(rest_client.RestClient):
"""
@@ -39,6 +44,23 @@
self.build_interval = CONF.compute.build_interval
self.build_timeout = CONF.compute.build_timeout
+ def _translate_attributes_to_json(self, volume):
+ volume_host_attr = '{' + VOLUME_HOST_NS + '}host'
+ volume_mig_stat_attr = '{' + VOLUME_MIG_STATUS_NS + '}migstat'
+ volume_mig_name_attr = '{' + VOLUME_MIG_STATUS_NS + '}name_id'
+ volume_tenant_id_attr = '{' + VOLUMES_TENANT_NS + '}tenant_id'
+ if volume_host_attr in volume:
+ volume['os-vol-host-attr:host'] = volume.pop(volume_host_attr)
+ if volume_mig_stat_attr in volume:
+ volume['os-vol-mig-status-attr:migstat'] = volume.pop(
+ volume_mig_stat_attr)
+ if volume_mig_name_attr in volume:
+ volume['os-vol-mig-status-attr:name_id'] = volume.pop(
+ volume_mig_name_attr)
+ if volume_tenant_id_attr in volume:
+ volume['os-vol-tenant-attr:tenant_id'] = volume.pop(
+ volume_tenant_id_attr)
+
def _parse_volume(self, body):
vol = dict((attr, body.get(attr)) for attr in body.keys())
@@ -52,6 +74,8 @@
child.getchildren())
else:
vol[tag] = common.xml_to_json(child)
+ self._translate_attributes_to_json(vol)
+ self._check_if_bootable(vol)
return vol
def get_attachment_from_volume(self, volume):
@@ -90,8 +114,6 @@
volumes = []
if body is not None:
volumes += [self._parse_volume(vol) for vol in list(body)]
- for v in volumes:
- v = self._check_if_bootable(v)
return resp, volumes
def list_volumes_with_detail(self, params=None):
@@ -106,8 +128,6 @@
volumes = []
if body is not None:
volumes += [self._parse_volume(vol) for vol in list(body)]
- for v in volumes:
- v = self._check_if_bootable(v)
return resp, volumes
def get_volume(self, volume_id):
@@ -115,7 +135,6 @@
url = "volumes/%s" % str(volume_id)
resp, body = self.get(url)
body = self._parse_volume(etree.fromstring(body))
- body = self._check_if_bootable(body)
return resp, body
def create_volume(self, size=None, **kwargs):
diff --git a/tempest/stress/README.rst b/tempest/stress/README.rst
index b56f96b..0a63679 100644
--- a/tempest/stress/README.rst
+++ b/tempest/stress/README.rst
@@ -34,14 +34,14 @@
In order to use this discovery you have to be in the tempest root directory
and execute the following:
- tempest/stress/run_stress.py -a -d 30
+ run-tempest-stress -a -d 30
Running the sample test
-----------------------
-To test installation, do the following (from the tempest/stress directory):
+To test installation, do the following:
- ./run_stress.py -t etc/server-create-destroy-test.json -d 30
+ run-tempest-stress -t tempest/stress/etc/server-create-destroy-test.json -d 30
This sample test tries to create a few VMs and kill a few VMs.
diff --git a/tempest/stress/actions/ssh_floating.py b/tempest/stress/actions/ssh_floating.py
index c330165..478cd07 100644
--- a/tempest/stress/actions/ssh_floating.py
+++ b/tempest/stress/actions/ssh_floating.py
@@ -32,8 +32,6 @@
stderr=subprocess.PIPE)
proc.wait()
success = proc.returncode == 0
- self.logger.info("%s(%s): %s", self.server_id, self.floating['ip'],
- "pong!" if success else "no pong :(")
return success
def tcp_connect_scan(self, addr, port):
@@ -58,11 +56,17 @@
raise RuntimeError("Cannot connect to the ssh port.")
def check_icmp_echo(self):
+ self.logger.info("%s(%s): Pinging..",
+ self.server_id, self.floating['ip'])
+
def func():
return self.ping_ip_address(self.floating['ip'])
if not tempest.test.call_until_true(func, self.check_timeout,
self.check_interval):
- raise RuntimeError("Cannot ping the machine.")
+ raise RuntimeError("%s(%s): Cannot ping the machine.",
+ self.server_id, self.floating['ip'])
+ self.logger.info("%s(%s): pong :)",
+ self.server_id, self.floating['ip'])
def _create_vm(self):
self.name = name = data_utils.rand_name("instance")
@@ -170,6 +174,8 @@
self._create_vm()
if self.reboot:
self.manager.servers_client.reboot(self.server_id, 'HARD')
+ self.manager.servers_client.wait_for_server_status(self.server_id,
+ 'ACTIVE')
self.run_core()
diff --git a/tempest/stress/actions/volume_attach_verify.py b/tempest/stress/actions/volume_attach_verify.py
new file mode 100644
index 0000000..1bc3b06
--- /dev/null
+++ b/tempest/stress/actions/volume_attach_verify.py
@@ -0,0 +1,232 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tempest.common.utils import data_utils
+from tempest.common.utils.linux import remote_client
+from tempest import config
+import tempest.stress.stressaction as stressaction
+import tempest.test
+
+import re
+CONF = config.CONF
+
+
+class VolumeVerifyStress(stressaction.StressAction):
+
+ def _create_keypair(self):
+ keyname = data_utils.rand_name("key")
+ resp, self.key = self.manager.keypairs_client.create_keypair(keyname)
+ assert(resp.status == 200)
+
+ def _delete_keypair(self):
+ resp, _ = self.manager.keypairs_client.delete_keypair(self.key['name'])
+ assert(resp.status == 202)
+
+ def _create_vm(self):
+ self.name = name = data_utils.rand_name("instance")
+ servers_client = self.manager.servers_client
+ self.logger.info("creating %s" % name)
+ vm_args = self.vm_extra_args.copy()
+ vm_args['security_groups'] = [self.sec_grp]
+ vm_args['key_name'] = self.key['name']
+ resp, server = servers_client.create_server(name, self.image,
+ self.flavor,
+ **vm_args)
+ self.server_id = server['id']
+ assert(resp.status == 202)
+ self.manager.servers_client.wait_for_server_status(self.server_id,
+ 'ACTIVE')
+
+ def _destroy_vm(self):
+ self.logger.info("deleting server: %s" % self.server_id)
+ resp, _ = self.manager.servers_client.delete_server(self.server_id)
+ assert(resp.status == 204) # It cannot be 204 if I had to wait..
+ self.manager.servers_client.wait_for_server_termination(self.server_id)
+ self.logger.info("deleted server: %s" % self.server_id)
+
+ def _create_sec_group(self):
+ sec_grp_cli = self.manager.security_groups_client
+ s_name = data_utils.rand_name('sec_grp-')
+ s_description = data_utils.rand_name('desc-')
+ _, self.sec_grp = sec_grp_cli.create_security_group(s_name,
+ s_description)
+ create_rule = sec_grp_cli.create_security_group_rule
+ create_rule(self.sec_grp['id'], 'tcp', 22, 22)
+ create_rule(self.sec_grp['id'], 'icmp', -1, -1)
+
+ def _destroy_sec_grp(self):
+ sec_grp_cli = self.manager.security_groups_client
+ sec_grp_cli.delete_security_group(self.sec_grp['id'])
+
+ def _create_floating_ip(self):
+ floating_cli = self.manager.floating_ips_client
+ _, self.floating = floating_cli.create_floating_ip(self.floating_pool)
+
+ def _destroy_floating_ip(self):
+ cli = self.manager.floating_ips_client
+ cli.delete_floating_ip(self.floating['id'])
+ cli.wait_for_resource_deletion(self.floating['id'])
+ self.logger.info("Deleted Floating IP %s", str(self.floating['ip']))
+
+ def _create_volume(self):
+ name = data_utils.rand_name("volume")
+ self.logger.info("creating volume: %s" % name)
+ volumes_client = self.manager.volumes_client
+ resp, self.volume = volumes_client.create_volume(size=1,
+ display_name=
+ name)
+ assert(resp.status == 200)
+ volumes_client.wait_for_volume_status(self.volume['id'],
+ 'available')
+ self.logger.info("created volume: %s" % self.volume['id'])
+
+ def _delete_volume(self):
+ self.logger.info("deleting volume: %s" % self.volume['id'])
+ volumes_client = self.manager.volumes_client
+ resp, _ = volumes_client.delete_volume(self.volume['id'])
+ assert(resp.status == 202)
+ volumes_client.wait_for_resource_deletion(self.volume['id'])
+ self.logger.info("deleted volume: %s" % self.volume['id'])
+
+ def _wait_disassociate(self):
+ cli = self.manager.floating_ips_client
+
+ def func():
+ _, floating = cli.get_floating_ip_details(self.floating['id'])
+ return floating['instance_id'] is None
+
+ if not tempest.test.call_until_true(func, CONF.compute.build_timeout,
+ CONF.compute.build_interval):
+ raise RuntimeError("IP disassociate timeout!")
+
+ def new_server_ops(self):
+ self._create_vm()
+ cli = self.manager.floating_ips_client
+ cli.associate_floating_ip_to_server(self.floating['ip'],
+ self.server_id)
+ if self.ssh_test_before_attach and self.enable_ssh_verify:
+ self.logger.info("Scanning for block devices via ssh on %s"
+ % self.server_id)
+ self.part_wait(self.detach_match_count)
+
+ def setUp(self, **kwargs):
+ """Note able configuration combinations:
+ Closest options to the test_stamp_pattern:
+ new_server = True
+ new_volume = True
+ enable_ssh_verify = True
+ ssh_test_before_attach = False
+ Just attaching:
+ new_server = False
+ new_volume = False
+ enable_ssh_verify = True
+ ssh_test_before_attach = True
+ Mostly API load by repeated attachment:
+ new_server = False
+ new_volume = False
+ enable_ssh_verify = False
+ ssh_test_before_attach = False
+ Minimal Nova load, but cinder load not decreased:
+ new_server = False
+ new_volume = True
+ enable_ssh_verify = True
+ ssh_test_before_attach = True
+ """
+ self.image = CONF.compute.image_ref
+ self.flavor = CONF.compute.flavor_ref
+ self.vm_extra_args = kwargs.get('vm_extra_args', {})
+ self.floating_pool = kwargs.get('floating_pool', None)
+ self.new_volume = kwargs.get('new_volume', True)
+ self.new_server = kwargs.get('new_server', False)
+ self.enable_ssh_verify = kwargs.get('enable_ssh_verify', True)
+ self.ssh_test_before_attach = kwargs.get('ssh_test_before_attach',
+ False)
+ self.part_line_re = re.compile(kwargs.get('part_line_re', '.*vd.*'))
+ self.detach_match_count = kwargs.get('detach_match_count', 1)
+ self.attach_match_count = kwargs.get('attach_match_count', 2)
+ self.part_name = kwargs.get('part_name', '/dev/vdc')
+
+ self._create_floating_ip()
+ self._create_sec_group()
+ self._create_keypair()
+ private_key = self.key['private_key']
+ username = CONF.compute.image_ssh_user
+ self.remote_client = remote_client.RemoteClient(self.floating['ip'],
+ username,
+ pkey=private_key)
+ if not self.new_volume:
+ self._create_volume()
+ if not self.new_server:
+ self.new_server_ops()
+
+ # now we just test is number of partition increased or decrised
+ def part_wait(self, num_match):
+ def _part_state():
+ self.partitions = self.remote_client.get_partitions().split('\n')
+ matching = 0
+ for part_line in self.partitions[1:]:
+ if self.part_line_re.match(part_line):
+ matching += 1
+ return matching == num_match
+ if tempest.test.call_until_true(_part_state,
+ CONF.compute.build_timeout,
+ CONF.compute.build_interval):
+ return
+ else:
+ raise RuntimeError("Unexpected partitions: %s",
+ str(self.partitions))
+
+ def run(self):
+ if self.new_server:
+ self.new_server_ops()
+ if self.new_volume:
+ self._create_volume()
+ servers_client = self.manager.servers_client
+ self.logger.info("attach volume (%s) to vm %s" %
+ (self.volume['id'], self.server_id))
+ resp, body = servers_client.attach_volume(self.server_id,
+ self.volume['id'],
+ self.part_name)
+ assert(resp.status == 200)
+ self.manager.volumes_client.wait_for_volume_status(self.volume['id'],
+ 'in-use')
+ if self.enable_ssh_verify:
+ self.logger.info("Scanning for new block device on %s"
+ % self.server_id)
+ self.part_wait(self.attach_match_count)
+
+ resp, body = servers_client.detach_volume(self.server_id,
+ self.volume['id'])
+ assert(resp.status == 202)
+ self.manager.volumes_client.wait_for_volume_status(self.volume['id'],
+ 'available')
+ if self.enable_ssh_verify:
+ self.logger.info("Scanning for block device disapperance on %s"
+ % self.server_id)
+ self.part_wait(self.detach_match_count)
+ if self.new_volume:
+ self._delete_volume()
+ if self.new_server:
+ self._destroy_vm()
+
+ def tearDown(self):
+ cli = self.manager.floating_ips_client
+ cli.disassociate_floating_ip_from_server(self.floating['ip'],
+ self.server_id)
+ self._wait_disassociate()
+ if not self.new_server:
+ self._destroy_vm()
+ self._delete_keypair()
+ self._destroy_floating_ip()
+ self._destroy_sec_grp()
+ if not self.new_volume:
+ self._delete_volume()
diff --git a/tempest/stress/driver.py b/tempest/stress/driver.py
index 9660081..642108a 100644
--- a/tempest/stress/driver.py
+++ b/tempest/stress/driver.py
@@ -19,6 +19,7 @@
from six import moves
+from tempest import auth
from tempest import clients
from tempest.common import ssh
from tempest.common.utils import data_utils
@@ -80,17 +81,23 @@
return ret
-def sigchld_handler(signal, frame):
+def sigchld_handler(signalnum, frame):
"""
Signal handler (only active if stop_on_error is True).
"""
- terminate_all_processes()
+ for process in processes:
+ if (not process['process'].is_alive() and
+ process['process'].exitcode != 0):
+ signal.signal(signalnum, signal.SIG_DFL)
+ terminate_all_processes()
+ break
def terminate_all_processes(check_interval=20):
"""
Goes through the process list and terminates all child processes.
"""
+ LOG.info("Stopping all processes.")
for process in processes:
if process['process'].is_alive():
try:
@@ -141,9 +148,10 @@
password,
tenant['id'],
"email")
- manager = clients.Manager(username=username,
- password="pass",
- tenant_name=tenant_name)
+ creds = auth.get_credentials(username=username,
+ password=password,
+ tenant_name=tenant_name)
+ manager = clients.Manager(credentials=creds)
test_obj = importutils.import_class(test['action'])
test_run = test_obj(manager, max_runs, stop_on_error)
@@ -174,34 +182,39 @@
signal.signal(signal.SIGCHLD, sigchld_handler)
end_time = time.time() + duration
had_errors = False
- while True:
- if max_runs is None:
- remaining = end_time - time.time()
- if remaining <= 0:
- break
- else:
- remaining = log_check_interval
- all_proc_term = True
- for process in processes:
- if process['process'].is_alive():
- all_proc_term = False
+ try:
+ while True:
+ if max_runs is None:
+ remaining = end_time - time.time()
+ if remaining <= 0:
break
- if all_proc_term:
- break
-
- time.sleep(min(remaining, log_check_interval))
- if stop_on_error:
- for process in processes:
- if process['statistic']['fails'] > 0:
+ else:
+ remaining = log_check_interval
+ all_proc_term = True
+ for process in processes:
+ if process['process'].is_alive():
+ all_proc_term = False
+ break
+ if all_proc_term:
break
- if not logfiles:
- continue
- if _has_error_in_logs(logfiles, computes, ssh_user, ssh_key,
- stop_on_error):
- had_errors = True
- break
+ time.sleep(min(remaining, log_check_interval))
+ if stop_on_error:
+ if any([True for proc in processes
+ if proc['statistic']['fails'] > 0]):
+ break
+ if not logfiles:
+ continue
+ if _has_error_in_logs(logfiles, computes, ssh_user, ssh_key,
+ stop_on_error):
+ had_errors = True
+ break
+ except KeyboardInterrupt:
+ LOG.warning("Interrupted, going to print statistics and exit ...")
+
+ if stop_on_error:
+ signal.signal(signal.SIGCHLD, signal.SIG_DFL)
terminate_all_processes()
sum_fails = 0
diff --git a/tempest/stress/etc/volume-attach-verify.json b/tempest/stress/etc/volume-attach-verify.json
new file mode 100644
index 0000000..731f5ed
--- /dev/null
+++ b/tempest/stress/etc/volume-attach-verify.json
@@ -0,0 +1,11 @@
+[{"action": "tempest.stress.actions.volume_attach_verify.VolumeVerifyStress",
+ "threads": 1,
+ "use_admin": false,
+ "use_isolated_tenants": false,
+ "kwargs": {"vm_extra_args": {},
+ "new_volume": true,
+ "new_server": false,
+ "ssh_test_before_attach": false,
+ "enable_ssh_verify": true}
+}
+]
diff --git a/tempest/test.py b/tempest/test.py
index 254fffa..748a98c 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -307,26 +307,18 @@
cls.__name__, network_resources=cls.network_resources)
force_tenant_isolation = getattr(cls, 'force_tenant_isolation', None)
- if (CONF.compute.allow_tenant_isolation or
- force_tenant_isolation):
+ if CONF.compute.allow_tenant_isolation or force_tenant_isolation:
creds = cls.isolated_creds.get_primary_creds()
- username, tenant_name, password = creds
if getattr(cls, '_interface', None):
- os = clients.Manager(username=username,
- password=password,
- tenant_name=tenant_name,
+ os = clients.Manager(credentials=creds,
interface=cls._interface,
service=cls._service)
elif interface:
- os = clients.Manager(username=username,
- password=password,
- tenant_name=tenant_name,
+ os = clients.Manager(credentials=creds,
interface=interface,
service=cls._service)
else:
- os = clients.Manager(username=username,
- password=password,
- tenant_name=tenant_name,
+ os = clients.Manager(credentials=creds,
service=cls._service)
else:
if getattr(cls, '_interface', None):
diff --git a/tempest/tests/base.py b/tempest/tests/base.py
index 15e4311..f4df3b9 100644
--- a/tempest/tests/base.py
+++ b/tempest/tests/base.py
@@ -12,28 +12,16 @@
# License for the specific language governing permissions and limitations
# under the License.
-import os
-
-import fixtures
import mock
-import testtools
-from tempest.openstack.common.fixture import moxstubout
+from oslotest import base
+from oslotest import moxstubout
-class TestCase(testtools.TestCase):
+class TestCase(base.BaseTestCase):
def setUp(self):
super(TestCase, self).setUp()
- if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
- os.environ.get('OS_STDOUT_CAPTURE') == '1'):
- stdout = self.useFixture(fixtures.StringStream('stdout')).stream
- self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
- if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
- os.environ.get('OS_STDERR_CAPTURE') == '1'):
- stderr = self.useFixture(fixtures.StringStream('stderr')).stream
- self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
-
mox_fixture = self.useFixture(moxstubout.MoxStubout())
self.mox = mox_fixture.mox
self.stubs = mox_fixture.stubs
diff --git a/tempest/api/compute/v3/certificates/__init__.py b/tempest/tests/cmd/__init__.py
similarity index 100%
copy from tempest/api/compute/v3/certificates/__init__.py
copy to tempest/tests/cmd/__init__.py
diff --git a/tempest/tests/cmd/test_verify_tempest_config.py b/tempest/tests/cmd/test_verify_tempest_config.py
new file mode 100644
index 0000000..40caf30
--- /dev/null
+++ b/tempest/tests/cmd/test_verify_tempest_config.py
@@ -0,0 +1,397 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+
+import mock
+
+from tempest.cmd import verify_tempest_config
+from tempest import config
+from tempest.openstack.common.fixture import mockpatch
+from tempest.tests import base
+from tempest.tests import fake_config
+
+
+class TestGetAPIVersions(base.TestCase):
+
+ def test_url_grab_versioned_nova_nossl(self):
+ base_url = 'http://127.0.0.1:8774/v2/'
+ endpoint = verify_tempest_config._get_unversioned_endpoint(base_url)
+ self.assertEqual('http://127.0.0.1:8774', endpoint)
+
+ def test_url_grab_versioned_nova_ssl(self):
+ base_url = 'https://127.0.0.1:8774/v3/'
+ endpoint = verify_tempest_config._get_unversioned_endpoint(base_url)
+ self.assertEqual('https://127.0.0.1:8774', endpoint)
+
+
+class TestDiscovery(base.TestCase):
+
+ def setUp(self):
+ super(TestDiscovery, self).setUp()
+ self.useFixture(fake_config.ConfigFixture())
+ self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
+
+ def test_get_keystone_api_versions(self):
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, '_get_unversioned_endpoint',
+ return_value='http://fake_endpoint:5000'))
+ fake_resp = {'versions': {'values': [{'id': 'v2.0'}, {'id': 'v3.0'}]}}
+ fake_resp = json.dumps(fake_resp)
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config.RAW_HTTP, 'request',
+ return_value=(None, fake_resp)))
+ fake_os = mock.MagicMock()
+ versions = verify_tempest_config._get_api_versions(fake_os, 'keystone')
+ self.assertIn('v2.0', versions)
+ self.assertIn('v3.0', versions)
+
+ def test_get_cinder_api_versions(self):
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, '_get_unversioned_endpoint',
+ return_value='http://fake_endpoint:5000'))
+ fake_resp = {'versions': [{'id': 'v1.0'}, {'id': 'v2.0'}]}
+ fake_resp = json.dumps(fake_resp)
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config.RAW_HTTP, 'request',
+ return_value=(None, fake_resp)))
+ fake_os = mock.MagicMock()
+ versions = verify_tempest_config._get_api_versions(fake_os, 'cinder')
+ self.assertIn('v1.0', versions)
+ self.assertIn('v2.0', versions)
+
+ def test_get_nova_versions(self):
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, '_get_unversioned_endpoint',
+ return_value='http://fake_endpoint:5000'))
+ fake_resp = {'versions': [{'id': 'v2.0'}, {'id': 'v3.0'}]}
+ fake_resp = json.dumps(fake_resp)
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config.RAW_HTTP, 'request',
+ return_value=(None, fake_resp)))
+ fake_os = mock.MagicMock()
+ versions = verify_tempest_config._get_api_versions(fake_os, 'nova')
+ self.assertIn('v2.0', versions)
+ self.assertIn('v3.0', versions)
+
+ def test_verify_keystone_api_versions_no_v3(self):
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, '_get_unversioned_endpoint',
+ return_value='http://fake_endpoint:5000'))
+ fake_resp = {'versions': {'values': [{'id': 'v2.0'}]}}
+ fake_resp = json.dumps(fake_resp)
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config.RAW_HTTP, 'request',
+ return_value=(None, fake_resp)))
+ fake_os = mock.MagicMock()
+ with mock.patch.object(verify_tempest_config,
+ 'print_and_or_update') as print_mock:
+ verify_tempest_config.verify_keystone_api_versions(fake_os, True)
+ print_mock.assert_called_once_with('api_v3',
+ 'identity_feature_enabled',
+ False, True)
+
+ def test_verify_keystone_api_versions_no_v2(self):
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, '_get_unversioned_endpoint',
+ return_value='http://fake_endpoint:5000'))
+ fake_resp = {'versions': {'values': [{'id': 'v3.0'}]}}
+ fake_resp = json.dumps(fake_resp)
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config.RAW_HTTP, 'request',
+ return_value=(None, fake_resp)))
+ fake_os = mock.MagicMock()
+ with mock.patch.object(verify_tempest_config,
+ 'print_and_or_update') as print_mock:
+ verify_tempest_config.verify_keystone_api_versions(fake_os, True)
+ print_mock.assert_called_once_with('api_v2',
+ 'identity_feature_enabled',
+ False, True)
+
+ def test_verify_cinder_api_versions_no_v2(self):
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, '_get_unversioned_endpoint',
+ return_value='http://fake_endpoint:5000'))
+ fake_resp = {'versions': [{'id': 'v1.0'}]}
+ fake_resp = json.dumps(fake_resp)
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config.RAW_HTTP, 'request',
+ return_value=(None, fake_resp)))
+ fake_os = mock.MagicMock()
+ with mock.patch.object(verify_tempest_config,
+ 'print_and_or_update') as print_mock:
+ verify_tempest_config.verify_cinder_api_versions(fake_os, True)
+ print_mock.assert_called_once_with('api_v2', 'volume_feature_enabled',
+ False, True)
+
+ def test_verify_cinder_api_versions_no_v1(self):
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, '_get_unversioned_endpoint',
+ return_value='http://fake_endpoint:5000'))
+ fake_resp = {'versions': [{'id': 'v2.0'}]}
+ fake_resp = json.dumps(fake_resp)
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config.RAW_HTTP, 'request',
+ return_value=(None, fake_resp)))
+ fake_os = mock.MagicMock()
+ with mock.patch.object(verify_tempest_config,
+ 'print_and_or_update') as print_mock:
+ verify_tempest_config.verify_cinder_api_versions(fake_os, True)
+ print_mock.assert_called_once_with('api_v1', 'volume_feature_enabled',
+ False, True)
+
+ def test_verify_nova_versions(self):
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, '_get_unversioned_endpoint',
+ return_value='http://fake_endpoint:5000'))
+ fake_resp = {'versions': [{'id': 'v2.0'}]}
+ fake_resp = json.dumps(fake_resp)
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config.RAW_HTTP, 'request',
+ return_value=(None, fake_resp)))
+ fake_os = mock.MagicMock()
+ with mock.patch.object(verify_tempest_config,
+ 'print_and_or_update') as print_mock:
+ verify_tempest_config.verify_nova_api_versions(fake_os, True)
+ print_mock.assert_called_once_with('api_v3', 'compute_feature_enabled',
+ False, True)
+
+ def test_verify_glance_version_no_v2_with_v1_1(self):
+ def fake_get_versions():
+ return (None, ['v1.1'])
+ fake_os = mock.MagicMock()
+ fake_os.image_client.get_versions = fake_get_versions
+ with mock.patch.object(verify_tempest_config,
+ 'print_and_or_update') as print_mock:
+ verify_tempest_config.verify_glance_api_versions(fake_os, True)
+ print_mock.assert_called_once_with('api_v2', 'image_feature_enabled',
+ False, True)
+
+ def test_verify_glance_version_no_v2_with_v1_0(self):
+ def fake_get_versions():
+ return (None, ['v1.0'])
+ fake_os = mock.MagicMock()
+ fake_os.image_client.get_versions = fake_get_versions
+ with mock.patch.object(verify_tempest_config,
+ 'print_and_or_update') as print_mock:
+ verify_tempest_config.verify_glance_api_versions(fake_os, True)
+ print_mock.assert_called_once_with('api_v2', 'image_feature_enabled',
+ False, True)
+
+ def test_verify_glance_version_no_v1(self):
+ def fake_get_versions():
+ return (None, ['v2.0'])
+ fake_os = mock.MagicMock()
+ fake_os.image_client.get_versions = fake_get_versions
+ with mock.patch.object(verify_tempest_config,
+ 'print_and_or_update') as print_mock:
+ verify_tempest_config.verify_glance_api_versions(fake_os, True)
+ print_mock.assert_called_once_with('api_v1', 'image_feature_enabled',
+ False, True)
+
+ def test_verify_extensions_neutron(self):
+ def fake_list_extensions():
+ return (None, {'extensions': [{'alias': 'fake1'},
+ {'alias': 'fake2'},
+ {'alias': 'not_fake'}]})
+ fake_os = mock.MagicMock()
+ fake_os.network_client.list_extensions = fake_list_extensions
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, 'get_enabled_extensions',
+ return_value=(['fake1', 'fake2', 'fake3'])))
+ results = verify_tempest_config.verify_extensions(fake_os,
+ 'neutron', {})
+ self.assertIn('neutron', results)
+ self.assertIn('fake1', results['neutron'])
+ self.assertTrue(results['neutron']['fake1'])
+ self.assertIn('fake2', results['neutron'])
+ self.assertTrue(results['neutron']['fake2'])
+ self.assertIn('fake3', results['neutron'])
+ self.assertFalse(results['neutron']['fake3'])
+ self.assertIn('not_fake', results['neutron'])
+ self.assertFalse(results['neutron']['not_fake'])
+
+ def test_verify_extensions_neutron_all(self):
+ def fake_list_extensions():
+ return (None, {'extensions': [{'alias': 'fake1'},
+ {'alias': 'fake2'},
+ {'alias': 'not_fake'}]})
+ fake_os = mock.MagicMock()
+ fake_os.network_client.list_extensions = fake_list_extensions
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, 'get_enabled_extensions',
+ return_value=(['all'])))
+ results = verify_tempest_config.verify_extensions(fake_os,
+ 'neutron', {})
+ self.assertIn('neutron', results)
+ self.assertIn('extensions', results['neutron'])
+ self.assertEqual(['fake1', 'fake2', 'not_fake'],
+ results['neutron']['extensions'])
+
+ def test_verify_extensions_cinder(self):
+ def fake_list_extensions():
+ return (None, {'extensions': [{'name': 'fake1'},
+ {'name': 'fake2'},
+ {'name': 'not_fake'}]})
+ fake_os = mock.MagicMock()
+ fake_os.volumes_extension_client.list_extensions = fake_list_extensions
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, 'get_enabled_extensions',
+ return_value=(['fake1', 'fake2', 'fake3'])))
+ results = verify_tempest_config.verify_extensions(fake_os,
+ 'cinder', {})
+ self.assertIn('cinder', results)
+ self.assertIn('fake1', results['cinder'])
+ self.assertTrue(results['cinder']['fake1'])
+ self.assertIn('fake2', results['cinder'])
+ self.assertTrue(results['cinder']['fake2'])
+ self.assertIn('fake3', results['cinder'])
+ self.assertFalse(results['cinder']['fake3'])
+ self.assertIn('not_fake', results['cinder'])
+ self.assertFalse(results['cinder']['not_fake'])
+
+ def test_verify_extensions_cinder_all(self):
+ def fake_list_extensions():
+ return (None, {'extensions': [{'name': 'fake1'},
+ {'name': 'fake2'},
+ {'name': 'not_fake'}]})
+ fake_os = mock.MagicMock()
+ fake_os.volumes_extension_client.list_extensions = fake_list_extensions
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, 'get_enabled_extensions',
+ return_value=(['all'])))
+ results = verify_tempest_config.verify_extensions(fake_os,
+ 'cinder', {})
+ self.assertIn('cinder', results)
+ self.assertIn('extensions', results['cinder'])
+ self.assertEqual(['fake1', 'fake2', 'not_fake'],
+ results['cinder']['extensions'])
+
+ def test_verify_extensions_nova(self):
+ def fake_list_extensions():
+ return (None, {'extensions': [{'name': 'fake1'},
+ {'name': 'fake2'},
+ {'name': 'not_fake'}]})
+ fake_os = mock.MagicMock()
+ fake_os.extensions_client.list_extensions = fake_list_extensions
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, 'get_enabled_extensions',
+ return_value=(['fake1', 'fake2', 'fake3'])))
+ results = verify_tempest_config.verify_extensions(fake_os,
+ 'nova', {})
+ self.assertIn('nova', results)
+ self.assertIn('fake1', results['nova'])
+ self.assertTrue(results['nova']['fake1'])
+ self.assertIn('fake2', results['nova'])
+ self.assertTrue(results['nova']['fake2'])
+ self.assertIn('fake3', results['nova'])
+ self.assertFalse(results['nova']['fake3'])
+ self.assertIn('not_fake', results['nova'])
+ self.assertFalse(results['nova']['not_fake'])
+
+ def test_verify_extensions_nova_all(self):
+ def fake_list_extensions():
+ return (None, {'extensions': [{'name': 'fake1'},
+ {'name': 'fake2'},
+ {'name': 'not_fake'}]})
+ fake_os = mock.MagicMock()
+ fake_os.extensions_client.list_extensions = fake_list_extensions
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, 'get_enabled_extensions',
+ return_value=(['all'])))
+ results = verify_tempest_config.verify_extensions(fake_os,
+ 'nova', {})
+ self.assertIn('nova', results)
+ self.assertIn('extensions', results['nova'])
+ self.assertEqual(['fake1', 'fake2', 'not_fake'],
+ results['nova']['extensions'])
+
+ def test_verify_extensions_nova_v3(self):
+ def fake_list_extensions():
+ return (None, {'extensions': [{'name': 'fake1'},
+ {'name': 'fake2'},
+ {'name': 'not_fake'}]})
+ fake_os = mock.MagicMock()
+ fake_os.extensions_v3_client.list_extensions = fake_list_extensions
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, 'get_enabled_extensions',
+ return_value=(['fake1', 'fake2', 'fake3'])))
+ results = verify_tempest_config.verify_extensions(fake_os,
+ 'nova_v3', {})
+ self.assertIn('nova_v3', results)
+ self.assertIn('fake1', results['nova_v3'])
+ self.assertTrue(results['nova_v3']['fake1'])
+ self.assertIn('fake2', results['nova_v3'])
+ self.assertTrue(results['nova_v3']['fake2'])
+ self.assertIn('fake3', results['nova_v3'])
+ self.assertFalse(results['nova_v3']['fake3'])
+ self.assertIn('not_fake', results['nova_v3'])
+ self.assertFalse(results['nova_v3']['not_fake'])
+
+ def test_verify_extensions_nova_v3_all(self):
+ def fake_list_extensions():
+ return (None, {'extensions': [{'name': 'fake1'},
+ {'name': 'fake2'},
+ {'name': 'not_fake'}]})
+ fake_os = mock.MagicMock()
+ fake_os.extensions_v3_client.list_extensions = fake_list_extensions
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, 'get_enabled_extensions',
+ return_value=(['all'])))
+ results = verify_tempest_config.verify_extensions(fake_os,
+ 'nova_v3', {})
+ self.assertIn('nova_v3', results)
+ self.assertIn('extensions', results['nova_v3'])
+ self.assertEqual(['fake1', 'fake2', 'not_fake'],
+ results['nova_v3']['extensions'])
+
+ def test_verify_extensions_swift(self):
+ def fake_list_extensions():
+ return (None, {'fake1': 'metadata',
+ 'fake2': 'metadata',
+ 'not_fake': 'metadata',
+ 'swift': 'metadata'})
+ fake_os = mock.MagicMock()
+ fake_os.account_client.list_extensions = fake_list_extensions
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, 'get_enabled_extensions',
+ return_value=(['fake1', 'fake2', 'fake3'])))
+ results = verify_tempest_config.verify_extensions(fake_os, 'swift', {})
+ self.assertIn('swift', results)
+ self.assertIn('fake1', results['swift'])
+ self.assertTrue(results['swift']['fake1'])
+ self.assertIn('fake2', results['swift'])
+ self.assertTrue(results['swift']['fake2'])
+ self.assertIn('fake3', results['swift'])
+ self.assertFalse(results['swift']['fake3'])
+ self.assertIn('not_fake', results['swift'])
+ self.assertFalse(results['swift']['not_fake'])
+
+ def test_verify_extensions_swift_all(self):
+ def fake_list_extensions():
+ return (None, {'fake1': 'metadata',
+ 'fake2': 'metadata',
+ 'not_fake': 'metadata',
+ 'swift': 'metadata'})
+ fake_os = mock.MagicMock()
+ fake_os.account_client.list_extensions = fake_list_extensions
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, 'get_enabled_extensions',
+ return_value=(['all'])))
+ results = verify_tempest_config.verify_extensions(fake_os,
+ 'swift', {})
+ self.assertIn('swift', results)
+ self.assertIn('extensions', results['swift'])
+ self.assertEqual(['not_fake', 'fake1', 'fake2'],
+ results['swift']['extensions'])
diff --git a/tempest/tests/common/utils/test_file_utils.py b/tempest/tests/common/utils/test_file_utils.py
index 99ae033..605e82a 100644
--- a/tempest/tests/common/utils/test_file_utils.py
+++ b/tempest/tests/common/utils/test_file_utils.py
@@ -14,7 +14,6 @@
# under the License.
import mock
-from mock import patch
from tempest.common.utils import file_utils
from tempest.tests import base
@@ -23,7 +22,7 @@
class TestFileUtils(base.TestCase):
def test_have_effective_read_path(self):
- with patch('__builtin__.open', mock.mock_open(), create=True):
+ with mock.patch('__builtin__.open', mock.mock_open(), create=True):
result = file_utils.have_effective_read_access('fake_path')
self.assertTrue(result)
diff --git a/tempest/tests/common/utils/test_misc.py b/tempest/tests/common/utils/test_misc.py
index b8c6184..aee9805 100644
--- a/tempest/tests/common/utils/test_misc.py
+++ b/tempest/tests/common/utils/test_misc.py
@@ -50,3 +50,39 @@
self.assertEqual(test, test2)
test3 = TestBar()
self.assertNotEqual(test, test3)
+
+ def test_find_test_caller_test_case(self):
+ # Calling it from here should give us the method we're in.
+ self.assertEqual('TestMisc:test_find_test_caller_test_case',
+ misc.find_test_caller())
+
+ def test_find_test_caller_setup_self(self):
+ def setUp(self):
+ return misc.find_test_caller()
+ self.assertEqual('TestMisc:setUp', setUp(self))
+
+ def test_find_test_caller_setup_no_self(self):
+ def setUp():
+ return misc.find_test_caller()
+ self.assertEqual(':setUp', setUp())
+
+ def test_find_test_caller_setupclass_cls(self):
+ def setUpClass(cls): # noqa
+ return misc.find_test_caller()
+ self.assertEqual('TestMisc:setUpClass', setUpClass(self.__class__))
+
+ def test_find_test_caller_teardown_self(self):
+ def tearDown(self):
+ return misc.find_test_caller()
+ self.assertEqual('TestMisc:tearDown', tearDown(self))
+
+ def test_find_test_caller_teardown_no_self(self):
+ def tearDown():
+ return misc.find_test_caller()
+ self.assertEqual(':tearDown', tearDown())
+
+ def test_find_test_caller_teardown_class(self):
+ def tearDownClass(cls):
+ return misc.find_test_caller()
+ self.assertEqual('TestMisc:tearDownClass',
+ tearDownClass(self.__class__))
diff --git a/tempest/tests/fake_auth_provider.py b/tempest/tests/fake_auth_provider.py
index bc68d26..44c331e 100644
--- a/tempest/tests/fake_auth_provider.py
+++ b/tempest/tests/fake_auth_provider.py
@@ -13,6 +13,16 @@
# License for the specific language governing permissions and limitations
# under the License.
+from tempest.tests import fake_credentials
+
+
+def get_default_credentials(credential_type, fill_in=True):
+ return fake_credentials.FakeCredentials()
+
+
+def get_credentials(credential_type=None, fill_in=True, **kwargs):
+ return fake_credentials.FakeCredentials()
+
class FakeAuthProvider(object):
diff --git a/tempest/tests/fake_config.py b/tempest/tests/fake_config.py
index 4676cbd..4bed0c2 100644
--- a/tempest/tests/fake_config.py
+++ b/tempest/tests/fake_config.py
@@ -45,6 +45,16 @@
os.mkdir(str(os.environ.get('OS_TEST_LOCK_PATH')))
self.conf.set_default('lock_path',
str(os.environ.get('OS_TEST_LOCK_PATH')))
+ self.conf.set_default('auth_version', 'v2', group='identity')
+ for config_option in ['username', 'password', 'tenant_name']:
+ # Identity group items
+ for prefix in ['', 'alt_', 'admin_']:
+ self.conf.set_default(prefix + config_option,
+ 'fake_' + config_option,
+ group='identity')
+ # Compute Admin group items
+ self.conf.set_default(config_option, 'fake_' + config_option,
+ group='compute-admin')
class FakePrivate(config.TempestConfigPrivate):
diff --git a/tempest/tests/fake_credentials.py b/tempest/tests/fake_credentials.py
new file mode 100644
index 0000000..48f67d2
--- /dev/null
+++ b/tempest/tests/fake_credentials.py
@@ -0,0 +1,62 @@
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest import auth
+
+
+class FakeCredentials(auth.Credentials):
+
+ def is_valid(self):
+ return True
+
+
+class FakeKeystoneV2Credentials(auth.KeystoneV2Credentials):
+
+ def __init__(self):
+ creds = dict(
+ username='fake_username',
+ password='fake_password',
+ tenant_name='fake_tenant_name'
+ )
+ super(FakeKeystoneV2Credentials, self).__init__(**creds)
+
+
+class FakeKeystoneV3Credentials(auth.KeystoneV3Credentials):
+ """
+ Fake credentials suitable for the Keystone Identity V3 API
+ """
+
+ def __init__(self):
+ creds = dict(
+ username='fake_username',
+ password='fake_password',
+ user_domain_name='fake_domain_name',
+ project_name='fake_tenant_name'
+ )
+ super(FakeKeystoneV3Credentials, self).__init__(**creds)
+
+
+class FakeKeystoneV3DomainCredentials(auth.KeystoneV3Credentials):
+ """
+ Fake credentials suitable for the Keystone Identity V3 API, with no scope
+ """
+
+ def __init__(self):
+ creds = dict(
+ username='fake_username',
+ password='fake_password',
+ user_domain_name='fake_domain_name'
+ )
+ super(FakeKeystoneV3DomainCredentials, self).__init__(**creds)
diff --git a/tempest/tests/fake_http.py b/tempest/tests/fake_http.py
index a09d5ba..7b878af 100644
--- a/tempest/tests/fake_http.py
+++ b/tempest/tests/fake_http.py
@@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import copy
import httplib2
@@ -44,3 +45,29 @@
else:
msg = "unsupported return type %s" % self.return_type
raise TypeError(msg)
+
+
+class fake_httplib(object):
+ def __init__(self, headers, body=None,
+ version=1.0, status=200, reason="Ok"):
+ """
+ :param headers: dict representing HTTP response headers
+ :param body: file-like object
+ :param version: HTTP Version
+ :param status: Response status code
+ :param reason: Status code related message.
+ """
+ self.body = body
+ self.status = status
+ self.reason = reason
+ self.version = version
+ self.headers = headers
+
+ def getheaders(self):
+ return copy.deepcopy(self.headers).items()
+
+ def getheader(self, key, default):
+ return self.headers.get(key, default)
+
+ def read(self, amt):
+ return self.body.read(amt)
diff --git a/tempest/tests/fake_identity.py b/tempest/tests/fake_identity.py
index 058c9c2..1900fc9 100644
--- a/tempest/tests/fake_identity.py
+++ b/tempest/tests/fake_identity.py
@@ -113,7 +113,7 @@
"expires_at": "2020-01-01T00:00:10.000123Z",
"project": {
"domain": {
- "id": "fake_id",
+ "id": "fake_domain_id",
"name": "fake"
},
"id": "project_id",
@@ -121,7 +121,7 @@
},
"user": {
"domain": {
- "id": "domain_id",
+ "id": "fake_domain_id",
"name": "domain_name"
},
"id": "fake_user_id",
diff --git a/tempest/tests/stress/test_stress.py b/tempest/tests/stress/test_stress.py
index c76abde..5a334c5 100644
--- a/tempest/tests/stress/test_stress.py
+++ b/tempest/tests/stress/test_stress.py
@@ -18,12 +18,12 @@
import tempest.cli as cli
from tempest.openstack.common import log as logging
-import tempest.test
+from tempest.tests import base
LOG = logging.getLogger(__name__)
-class StressFrameworkTest(tempest.test.BaseTestCase):
+class StressFrameworkTest(base.TestCase):
"""Basic test for the stress test framework.
"""
@@ -51,5 +51,5 @@
return proc.returncode
def test_help_function(self):
- result = self._cmd("python", "-m tempest.stress.run_stress -h")
+ result = self._cmd("python", "-m tempest.cmd.run_stress -h")
self.assertEqual(0, result)
diff --git a/tempest/tests/test_auth.py b/tempest/tests/test_auth.py
index 62c20e3..1dcddad 100644
--- a/tempest/tests/test_auth.py
+++ b/tempest/tests/test_auth.py
@@ -16,24 +16,23 @@
import copy
import datetime
+from oslotest import mockpatch
+
from tempest import auth
from tempest.common import http
from tempest import config
from tempest import exceptions
-from tempest.openstack.common.fixture import mockpatch
from tempest.tests import base
+from tempest.tests import fake_auth_provider
from tempest.tests import fake_config
+from tempest.tests import fake_credentials
from tempest.tests import fake_http
from tempest.tests import fake_identity
class BaseAuthTestsSetUp(base.TestCase):
_auth_provider_class = None
- credentials = {
- 'username': 'fake_user',
- 'password': 'fake_pwd',
- 'tenant_name': 'fake_tenant'
- }
+ credentials = fake_credentials.FakeCredentials()
def _auth(self, credentials, **params):
"""
@@ -47,6 +46,10 @@
self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
self.fake_http = fake_http.fake_httplib2(return_type=200)
self.stubs.Set(http.ClosingHttp, 'request', self.fake_http.request)
+ self.stubs.Set(auth, 'get_credentials',
+ fake_auth_provider.get_credentials)
+ self.stubs.Set(auth, 'get_default_credentials',
+ fake_auth_provider.get_default_credentials)
self.auth_provider = self._auth(self.credentials)
@@ -58,12 +61,19 @@
"""
_auth_provider_class = auth.AuthProvider
- def test_check_credentials_is_dict(self):
- self.assertTrue(self.auth_provider.check_credentials({}))
+ def test_check_credentials_class(self):
+ self.assertRaises(NotImplementedError,
+ self.auth_provider.check_credentials,
+ auth.Credentials())
def test_check_credentials_bad_type(self):
self.assertFalse(self.auth_provider.check_credentials([]))
+ def test_instantiate_with_dict(self):
+ # Dict credentials are only supported for backward compatibility
+ auth_provider = self._auth(credentials={})
+ self.assertIsInstance(auth_provider.credentials, auth.Credentials)
+
def test_instantiate_with_bad_credentials_type(self):
"""
Assure that credentials with bad type fail with TypeError
@@ -100,10 +110,15 @@
self.assertIsNone(self.auth_provider.alt_part)
self.assertIsNone(self.auth_provider.alt_auth_data)
+ def test_fill_credentials(self):
+ self.assertRaises(NotImplementedError,
+ self.auth_provider.fill_credentials)
+
class TestKeystoneV2AuthProvider(BaseAuthTestsSetUp):
_endpoints = fake_identity.IDENTITY_V2_RESPONSE['access']['serviceCatalog']
_auth_provider_class = auth.KeystoneV2AuthProvider
+ credentials = fake_credentials.FakeKeystoneV2Credentials()
def setUp(self):
super(TestKeystoneV2AuthProvider, self).setUp()
@@ -123,6 +138,13 @@
def _get_token_from_fake_identity(self):
return fake_identity.TOKEN
+ def _get_from_fake_identity(self, attr):
+ access = fake_identity.IDENTITY_V2_RESPONSE['access']
+ if attr == 'user_id':
+ return access['user']['id']
+ elif attr == 'tenant_id':
+ return access['token']['tenant']['id']
+
def _test_request_helper(self, filters, expected):
url, headers, body = self.auth_provider.auth_request('GET',
self.target_url,
@@ -210,16 +232,12 @@
del cred[attr]
self.assertFalse(self.auth_provider.check_credentials(cred))
- def test_check_credentials_not_scoped_missing_tenant_name(self):
- cred = copy.copy(self.credentials)
- del cred['tenant_name']
- self.assertTrue(self.auth_provider.check_credentials(cred,
- scoped=False))
-
- def test_check_credentials_missing_tenant_name(self):
- cred = copy.copy(self.credentials)
- del cred['tenant_name']
- self.assertFalse(self.auth_provider.check_credentials(cred))
+ def test_fill_credentials(self):
+ self.auth_provider.fill_credentials()
+ creds = self.auth_provider.credentials
+ for attr in ['user_id', 'tenant_id']:
+ self.assertEqual(self._get_from_fake_identity(attr),
+ getattr(creds, attr))
def _test_base_url_helper(self, expected_url, filters,
auth_data=None):
@@ -321,12 +339,7 @@
class TestKeystoneV3AuthProvider(TestKeystoneV2AuthProvider):
_endpoints = fake_identity.IDENTITY_V3_RESPONSE['token']['catalog']
_auth_provider_class = auth.KeystoneV3AuthProvider
- credentials = {
- 'username': 'fake_user',
- 'password': 'fake_pwd',
- 'tenant_name': 'fake_tenant',
- 'domain_name': 'fake_domain_name',
- }
+ credentials = fake_credentials.FakeKeystoneV3Credentials()
def setUp(self):
super(TestKeystoneV3AuthProvider, self).setUp()
@@ -346,10 +359,44 @@
access['expires_at'] = date_as_string
return token, access
- def test_check_credentials_missing_tenant_name(self):
- cred = copy.copy(self.credentials)
- del cred['domain_name']
- self.assertFalse(self.auth_provider.check_credentials(cred))
+ def _get_from_fake_identity(self, attr):
+ token = fake_identity.IDENTITY_V3_RESPONSE['token']
+ if attr == 'user_id':
+ return token['user']['id']
+ elif attr == 'project_id':
+ return token['project']['id']
+ elif attr == 'user_domain_id':
+ return token['user']['domain']['id']
+ elif attr == 'project_domain_id':
+ return token['project']['domain']['id']
+
+ def test_check_credentials_missing_attribute(self):
+ # reset credentials to fresh ones
+ self.credentials.reset()
+ for attr in ['username', 'password', 'user_domain_name',
+ 'project_domain_name']:
+ cred = copy.copy(self.credentials)
+ del cred[attr]
+ self.assertFalse(self.auth_provider.check_credentials(cred),
+ "Credentials should be invalid without %s" % attr)
+
+ def test_check_domain_credentials_missing_attribute(self):
+ # reset credentials to fresh ones
+ self.credentials.reset()
+ domain_creds = fake_credentials.FakeKeystoneV3DomainCredentials()
+ for attr in ['username', 'password', 'user_domain_name']:
+ cred = copy.copy(domain_creds)
+ del cred[attr]
+ self.assertFalse(self.auth_provider.check_credentials(cred),
+ "Credentials should be invalid without %s" % attr)
+
+ def test_fill_credentials(self):
+ self.auth_provider.fill_credentials()
+ creds = self.auth_provider.credentials
+ for attr in ['user_id', 'project_id', 'user_domain_id',
+ 'project_domain_id']:
+ self.assertEqual(self._get_from_fake_identity(attr),
+ getattr(creds, attr))
# Overwrites v2 test
def test_base_url_to_get_admin_endpoint(self):
diff --git a/tempest/tests/test_credentials.py b/tempest/tests/test_credentials.py
new file mode 100644
index 0000000..9da5f92
--- /dev/null
+++ b/tempest/tests/test_credentials.py
@@ -0,0 +1,229 @@
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from oslo.config import cfg
+
+from tempest import auth
+from tempest.common import http
+from tempest.common import tempest_fixtures as fixtures
+from tempest import config
+from tempest import exceptions
+from tempest.tests import base
+from tempest.tests import fake_config
+from tempest.tests import fake_http
+from tempest.tests import fake_identity
+
+
+class CredentialsTests(base.TestCase):
+ attributes = {}
+ credentials_class = auth.Credentials
+
+ def _get_credentials(self, attributes=None):
+ if attributes is None:
+ attributes = self.attributes
+ return self.credentials_class(**attributes)
+
+ def setUp(self):
+ super(CredentialsTests, self).setUp()
+ self.fake_http = fake_http.fake_httplib2(return_type=200)
+ self.stubs.Set(http.ClosingHttp, 'request', self.fake_http.request)
+ self.useFixture(fake_config.ConfigFixture())
+ self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
+
+ def test_create(self):
+ creds = self._get_credentials()
+ self.assertEqual(self.attributes, creds._initial)
+
+ def test_create_invalid_attr(self):
+ self.assertRaises(exceptions.InvalidCredentials,
+ self._get_credentials,
+ attributes=dict(invalid='fake'))
+
+ def test_default(self):
+ self.useFixture(fixtures.LockFixture('auth_version'))
+ for ctype in self.credentials_class.TYPES:
+ self.assertRaises(NotImplementedError,
+ self.credentials_class.get_default,
+ credentials_type=ctype)
+
+ def test_invalid_default(self):
+ self.assertRaises(exceptions.InvalidCredentials,
+ auth.Credentials.get_default,
+ credentials_type='invalid_type')
+
+ def test_is_valid(self):
+ creds = self._get_credentials()
+ self.assertRaises(NotImplementedError, creds.is_valid)
+
+
+class KeystoneV2CredentialsTests(CredentialsTests):
+ attributes = {
+ 'username': 'fake_username',
+ 'password': 'fake_password',
+ 'tenant_name': 'fake_tenant_name'
+ }
+
+ identity_response = fake_identity._fake_v2_response
+ credentials_class = auth.KeystoneV2Credentials
+
+ def setUp(self):
+ super(KeystoneV2CredentialsTests, self).setUp()
+ self.stubs.Set(http.ClosingHttp, 'request', self.identity_response)
+ self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
+
+ def _verify_credentials(self, credentials_class, filled=True,
+ creds_dict=None):
+
+ def _check(credentials):
+ # Check the right version of credentials has been returned
+ self.assertIsInstance(credentials, credentials_class)
+ # Check the id attributes are filled in
+ attributes = [x for x in credentials.ATTRIBUTES if (
+ '_id' in x and x != 'domain_id')]
+ for attr in attributes:
+ if filled:
+ self.assertIsNotNone(getattr(credentials, attr))
+ else:
+ self.assertIsNone(getattr(credentials, attr))
+
+ if creds_dict is None:
+ for ctype in auth.Credentials.TYPES:
+ creds = auth.get_default_credentials(credential_type=ctype,
+ fill_in=filled)
+ _check(creds)
+ else:
+ creds = auth.get_credentials(fill_in=filled, **creds_dict)
+ _check(creds)
+
+ def test_get_default_credentials(self):
+ self.useFixture(fixtures.LockFixture('auth_version'))
+ self._verify_credentials(credentials_class=self.credentials_class)
+
+ def test_get_credentials(self):
+ self.useFixture(fixtures.LockFixture('auth_version'))
+ self._verify_credentials(credentials_class=self.credentials_class,
+ creds_dict=self.attributes)
+
+ def test_get_credentials_not_filled(self):
+ self.useFixture(fixtures.LockFixture('auth_version'))
+ self._verify_credentials(credentials_class=self.credentials_class,
+ filled=False,
+ creds_dict=self.attributes)
+
+ def test_is_valid(self):
+ creds = self._get_credentials()
+ self.assertTrue(creds.is_valid())
+
+ def test_is_not_valid(self):
+ creds = self._get_credentials()
+ for attr in self.attributes.keys():
+ delattr(creds, attr)
+ self.assertFalse(creds.is_valid(),
+ "Credentials should be invalid without %s" % attr)
+
+ def test_default(self):
+ self.useFixture(fixtures.LockFixture('auth_version'))
+ for ctype in self.credentials_class.TYPES:
+ creds = self.credentials_class.get_default(credentials_type=ctype)
+ for attr in self.attributes.keys():
+ # Default configuration values related to credentials
+ # are defined as fake_* in fake_config.py
+ self.assertEqual(getattr(creds, attr), 'fake_' + attr)
+
+ def test_reset_all_attributes(self):
+ creds = self._get_credentials()
+ initial_creds = copy.deepcopy(creds)
+ set_attr = creds.__dict__.keys()
+ missing_attr = set(creds.ATTRIBUTES).difference(set_attr)
+ # Set all unset attributes, then reset
+ for attr in missing_attr:
+ setattr(creds, attr, 'fake' + attr)
+ creds.reset()
+ # Check reset credentials are same as initial ones
+ self.assertEqual(creds, initial_creds)
+
+ def test_reset_single_attribute(self):
+ creds = self._get_credentials()
+ initial_creds = copy.deepcopy(creds)
+ set_attr = creds.__dict__.keys()
+ missing_attr = set(creds.ATTRIBUTES).difference(set_attr)
+ # Set one unset attributes, then reset
+ for attr in missing_attr:
+ setattr(creds, attr, 'fake' + attr)
+ creds.reset()
+ # Check reset credentials are same as initial ones
+ self.assertEqual(creds, initial_creds)
+
+
+class KeystoneV3CredentialsTests(KeystoneV2CredentialsTests):
+ attributes = {
+ 'username': 'fake_username',
+ 'password': 'fake_password',
+ 'project_name': 'fake_project_name',
+ 'user_domain_name': 'fake_domain_name'
+ }
+
+ credentials_class = auth.KeystoneV3Credentials
+ identity_response = fake_identity._fake_v3_response
+
+ def setUp(self):
+ super(KeystoneV3CredentialsTests, self).setUp()
+ # Additional config items reset by cfg fixture after each test
+ cfg.CONF.set_default('auth_version', 'v3', group='identity')
+ # Identity group items
+ for prefix in ['', 'alt_', 'admin_']:
+ cfg.CONF.set_default(prefix + 'domain_name', 'fake_domain_name',
+ group='identity')
+ # Compute Admin group items
+ cfg.CONF.set_default('domain_name', 'fake_domain_name',
+ group='compute-admin')
+
+ def test_default(self):
+ self.useFixture(fixtures.LockFixture('auth_version'))
+ for ctype in self.credentials_class.TYPES:
+ creds = self.credentials_class.get_default(credentials_type=ctype)
+ for attr in self.attributes.keys():
+ if attr == 'project_name':
+ config_value = 'fake_tenant_name'
+ elif attr == 'user_domain_name':
+ config_value = 'fake_domain_name'
+ else:
+ config_value = 'fake_' + attr
+ self.assertEqual(getattr(creds, attr), config_value)
+
+ def test_synced_attributes(self):
+ attributes = self.attributes
+ # Create V3 credentials with tenant instead of project, and user_domain
+ for attr in ['project_id', 'user_domain_id']:
+ attributes[attr] = 'fake_' + attr
+ creds = self._get_credentials(attributes)
+ self.assertEqual(creds.project_name, creds.tenant_name)
+ self.assertEqual(creds.project_id, creds.tenant_id)
+ self.assertEqual(creds.user_domain_name, creds.project_domain_name)
+ self.assertEqual(creds.user_domain_id, creds.project_domain_id)
+ # Replace user_domain with project_domain
+ del attributes['user_domain_name']
+ del attributes['user_domain_id']
+ del attributes['project_name']
+ del attributes['project_id']
+ for attr in ['project_domain_name', 'project_domain_id',
+ 'tenant_name', 'tenant_id']:
+ attributes[attr] = 'fake_' + attr
+ self.assertEqual(creds.tenant_name, creds.project_name)
+ self.assertEqual(creds.tenant_id, creds.project_id)
+ self.assertEqual(creds.project_domain_name, creds.user_domain_name)
+ self.assertEqual(creds.project_domain_id, creds.user_domain_id)
diff --git a/tempest/tests/test_decorators.py b/tempest/tests/test_decorators.py
index 804204a..6b678f7 100644
--- a/tempest/tests/test_decorators.py
+++ b/tempest/tests/test_decorators.py
@@ -14,13 +14,12 @@
import mock
-import testtools
-
from oslo.config import cfg
+from oslotest import mockpatch
+import testtools
from tempest import config
from tempest import exceptions
-from tempest.openstack.common.fixture import mockpatch
from tempest import test
from tempest.tests import base
from tempest.tests import fake_config
diff --git a/tempest/tests/test_glance_http.py b/tempest/tests/test_glance_http.py
new file mode 100644
index 0000000..bb2df43
--- /dev/null
+++ b/tempest/tests/test_glance_http.py
@@ -0,0 +1,200 @@
+# Copyright 2014 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import httplib
+import json
+import mock
+import six
+import socket
+
+from tempest.common import glance_http
+from tempest import exceptions
+from tempest.openstack.common.fixture import mockpatch
+from tempest.tests import base
+from tempest.tests import fake_auth_provider
+from tempest.tests import fake_http
+
+
+class TestGlanceHTTPClient(base.TestCase):
+
+ def setUp(self):
+ super(TestGlanceHTTPClient, self).setUp()
+ self.fake_http = fake_http.fake_httplib2(return_type=200)
+ # NOTE(maurosr): using http here implies that we will be using httplib
+ # directly. With https glance_client would use an httpS version, but
+ # the real backend would still be httplib anyway and since we mock it
+ # that there is no reason to care.
+ self.endpoint = 'http://fake_url.com'
+ self.fake_auth = fake_auth_provider.FakeAuthProvider()
+
+ self.fake_auth.base_url = mock.MagicMock(return_value=self.endpoint)
+
+ self.useFixture(mockpatch.PatchObject(httplib.HTTPConnection,
+ 'request',
+ side_effect=self.fake_http.request(self.endpoint)[1]))
+ self.client = glance_http.HTTPClient(self.fake_auth, {})
+
+ def _set_response_fixture(self, header, status, resp_body):
+ resp = fake_http.fake_httplib(header, status=status,
+ body=six.StringIO(resp_body))
+ self.useFixture(mockpatch.PatchObject(httplib.HTTPConnection,
+ 'getresponse',
+ return_value=resp))
+ return resp
+
+ def test_json_request_without_content_type_header(self):
+ self._set_response_fixture({}, 200, 'fake_response_body')
+ resp, body = self.client.json_request('GET', '/images')
+ self.assertEqual(200, resp.status)
+ self.assertIsNone(body)
+
+ def test_json_request_with_xml_content_type_header(self):
+ self._set_response_fixture({'content-type': 'application/xml'},
+ 200, 'fake_response_body')
+ resp, body = self.client.json_request('GET', '/images')
+ self.assertEqual(200, resp.status)
+ self.assertIsNone(body)
+
+ def test_json_request_with_content_type_header(self):
+ self._set_response_fixture({'content-type': 'application/json'},
+ 200, 'fake_response_body')
+ resp, body = self.client.json_request('GET', '/images')
+ self.assertEqual(200, resp.status)
+ self.assertEqual('fake_response_body', body)
+
+ def test_json_request_fails_to_json_loads(self):
+ self._set_response_fixture({'content-type': 'application/json'},
+ 200, 'fake_response_body')
+ self.useFixture(mockpatch.PatchObject(json, 'loads',
+ side_effect=ValueError()))
+ resp, body = self.client.json_request('GET', '/images')
+ self.assertEqual(200, resp.status)
+ self.assertEqual(body, 'fake_response_body')
+
+ def test_json_request_socket_timeout(self):
+ self.useFixture(mockpatch.PatchObject(httplib.HTTPConnection,
+ 'request',
+ side_effect=socket.timeout()))
+ self.assertRaises(exceptions.TimeoutException,
+ self.client.json_request, 'GET', '/images')
+
+ def test_json_request_endpoint_not_found(self):
+ self.useFixture(mockpatch.PatchObject(httplib.HTTPConnection,
+ 'request',
+ side_effect=socket.gaierror()))
+ self.assertRaises(exceptions.EndpointNotFound,
+ self.client.json_request, 'GET', '/images')
+
+ def test_raw_request(self):
+ self._set_response_fixture({}, 200, 'fake_response_body')
+ resp, body = self.client.raw_request('GET', '/images')
+ self.assertEqual(200, resp.status)
+ self.assertEqual('fake_response_body', body.read())
+
+ def test_raw_request_with_response_chunked(self):
+ self._set_response_fixture({}, 200, 'fake_response_body')
+ self.useFixture(mockpatch.PatchObject(glance_http,
+ 'CHUNKSIZE', 1))
+ resp, body = self.client.raw_request('GET', '/images')
+ self.assertEqual(200, resp.status)
+ self.assertEqual('fake_response_body', body.read())
+
+ def test_raw_request_chunked(self):
+ self.useFixture(mockpatch.PatchObject(glance_http,
+ 'CHUNKSIZE', 1))
+ self.useFixture(mockpatch.PatchObject(httplib.HTTPConnection,
+ 'endheaders'))
+ self.useFixture(mockpatch.PatchObject(httplib.HTTPConnection,
+ 'send'))
+
+ self._set_response_fixture({}, 200, 'fake_response_body')
+ req_body = six.StringIO('fake_request_body')
+ resp, body = self.client.raw_request('PUT', '/images', body=req_body)
+ self.assertEqual(200, resp.status)
+ self.assertEqual('fake_response_body', body.read())
+ httplib.HTTPConnection.send.assert_call_count(req_body.len)
+
+ def test_get_connection_class_for_https(self):
+ conn_class = self.client.get_connection_class('https')
+ self.assertEqual(glance_http.VerifiedHTTPSConnection, conn_class)
+
+ def test_get_connection_class_for_http(self):
+ conn_class = (self.client.get_connection_class('http'))
+ self.assertEqual(httplib.HTTPConnection, conn_class)
+
+ def test_get_connection_http(self):
+ self.assertTrue(isinstance(self.client.get_connection(),
+ httplib.HTTPConnection))
+
+ def test_get_connection_https(self):
+ endpoint = 'https://fake_url.com'
+ self.fake_auth.base_url = mock.MagicMock(return_value=endpoint)
+ self.client = glance_http.HTTPClient(self.fake_auth, {})
+ self.assertTrue(isinstance(self.client.get_connection(),
+ glance_http.VerifiedHTTPSConnection))
+
+ def test_get_connection_url_not_fount(self):
+ self.useFixture(mockpatch.PatchObject(self.client, 'connection_class',
+ side_effect=httplib.InvalidURL()
+ ))
+ self.assertRaises(exceptions.EndpointNotFound,
+ self.client.get_connection)
+
+ def test_get_connection_kwargs_default_for_http(self):
+ kwargs = self.client.get_connection_kwargs('http')
+ self.assertEqual(600, kwargs['timeout'])
+ self.assertEqual(1, len(kwargs.keys()))
+
+ def test_get_connection_kwargs_set_timeout_for_http(self):
+ kwargs = self.client.get_connection_kwargs('http', timeout=10,
+ cacert='foo')
+ self.assertEqual(10, kwargs['timeout'])
+ # nothing more than timeout is evaluated for http connections
+ self.assertEqual(1, len(kwargs.keys()))
+
+ def test_get_connection_kwargs_default_for_https(self):
+ kwargs = self.client.get_connection_kwargs('https')
+ self.assertEqual(600, kwargs['timeout'])
+ self.assertEqual(None, kwargs['cacert'])
+ self.assertEqual(None, kwargs['cert_file'])
+ self.assertEqual(None, kwargs['key_file'])
+ self.assertEqual(False, kwargs['insecure'])
+ self.assertEqual(True, kwargs['ssl_compression'])
+ self.assertEqual(6, len(kwargs.keys()))
+
+ def test_get_connection_kwargs_set_params_for_https(self):
+ kwargs = self.client.get_connection_kwargs('https', timeout=10,
+ cacert='foo',
+ cert_file='/foo/bar.cert',
+ key_file='/foo/key.pem',
+ insecure=True,
+ ssl_compression=False)
+ self.assertEqual(10, kwargs['timeout'])
+ self.assertEqual('foo', kwargs['cacert'])
+ self.assertEqual('/foo/bar.cert', kwargs['cert_file'])
+ self.assertEqual('/foo/key.pem', kwargs['key_file'])
+ self.assertEqual(True, kwargs['insecure'])
+ self.assertEqual(False, kwargs['ssl_compression'])
+ self.assertEqual(6, len(kwargs.keys()))
+
+
+class TestResponseBodyIterator(base.TestCase):
+
+ def test_iter_default_chunk_size_64k(self):
+ resp = fake_http.fake_httplib({}, six.StringIO(
+ 'X' * (glance_http.CHUNKSIZE + 1)))
+ iterator = glance_http.ResponseBodyIterator(resp)
+ chunks = list(iterator)
+ self.assertEqual(chunks, ['X' * glance_http.CHUNKSIZE, 'X'])
diff --git a/tempest/tests/test_hacking.py b/tempest/tests/test_hacking.py
new file mode 100644
index 0000000..ab81836
--- /dev/null
+++ b/tempest/tests/test_hacking.py
@@ -0,0 +1,95 @@
+# Copyright 2014 Matthew Treinish
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.hacking import checks
+from tempest.tests import base
+
+
+class HackingTestCase(base.TestCase):
+ """
+ This class tests the hacking checks in tempest.hacking.checks by passing
+ strings to the check methods like the pep8/flake8 parser would. The parser
+ loops over each line in the file and then passes the parameters to the
+ check method. The parameter names in the check method dictate what type of
+ object is passed to the check method. The parameter types are::
+
+ logical_line: A processed line with the following modifications:
+ - Multi-line statements converted to a single line.
+ - Stripped left and right.
+ - Contents of strings replaced with "xxx" of same length.
+ - Comments removed.
+ physical_line: Raw line of text from the input file.
+ lines: a list of the raw lines from the input file
+ tokens: the tokens that contribute to this logical line
+ line_number: line number in the input file
+ total_lines: number of lines in the input file
+ blank_lines: blank lines before this one
+ indent_char: indentation character in this file (" " or "\t")
+ indent_level: indentation (with tabs expanded to multiples of 8)
+ previous_indent_level: indentation on previous line
+ previous_logical: previous logical line
+ filename: Path of the file being run through pep8
+
+ When running a test on a check method the return will be False/None if
+ there is no violation in the sample input. If there is an error a tuple is
+ returned with a position in the line, and a message. So to check the result
+ just assertTrue if the check is expected to fail and assertFalse if it
+ should pass.
+ """
+ def test_no_setupclass_for_unit_tests(self):
+ self.assertTrue(checks.no_setupclass_for_unit_tests(
+ " def setUpClass(cls):", './tempest/tests/fake_test.py'))
+ self.assertIsNone(checks.no_setupclass_for_unit_tests(
+ " def setUpClass(cls): # noqa", './tempest/tests/fake_test.py'))
+ self.assertFalse(checks.no_setupclass_for_unit_tests(
+ " def setUpClass(cls):", './tempest/api/fake_test.py'))
+
+ def test_import_no_clients_in_api(self):
+ for client in checks.PYTHON_CLIENTS:
+ string = "import " + client + "client"
+ self.assertTrue(checks.import_no_clients_in_api(
+ string, './tempest/api/fake_test.py'))
+ self.assertFalse(checks.import_no_clients_in_api(
+ string, './tempest/scenario/fake_test.py'))
+
+ def test_scenario_tests_need_service_tags(self):
+ self.assertFalse(checks.scenario_tests_need_service_tags(
+ 'def test_fake:', './tempest/scenario/test_fake.py',
+ "@test.services('compute')"))
+ self.assertFalse(checks.scenario_tests_need_service_tags(
+ 'def test_fake_test:', './tempest/api/compute/test_fake.py',
+ "@test.services('image')"))
+ self.assertTrue(checks.scenario_tests_need_service_tags(
+ 'def test_fake_test:', './tempest/scenario/test_fake.py',
+ '\n'))
+
+ def test_no_vi_headers(self):
+ # NOTE(mtreinish) The lines parameter is used only for finding the
+ # line location in the file. So these tests just pass a list of an
+ # arbitrary length to use for verifying the check function.
+ self.assertTrue(checks.no_vi_headers(
+ '# vim: tabstop=4 shiftwidth=4 softtabstop=4', 1, range(250)))
+ self.assertTrue(checks.no_vi_headers(
+ '# vim: tabstop=4 shiftwidth=4 softtabstop=4', 249, range(250)))
+ self.assertFalse(checks.no_vi_headers(
+ '# vim: tabstop=4 shiftwidth=4 softtabstop=4', 149, range(250)))
+
+ def test_service_tags_not_in_module_path(self):
+ self.assertTrue(checks.service_tags_not_in_module_path(
+ "@test.services('compute')", './tempest/api/compute/fake_test.py'))
+ self.assertFalse(checks.service_tags_not_in_module_path(
+ "@test.services('compute')",
+ './tempest/scenario/compute/fake_test.py'))
+ self.assertFalse(checks.service_tags_not_in_module_path(
+ "@test.services('compute')", './tempest/api/image/fake_test.py'))
diff --git a/tempest/tests/test_rest_client.py b/tempest/tests/test_rest_client.py
index 64ad3bc..d20520c 100644
--- a/tempest/tests/test_rest_client.py
+++ b/tempest/tests/test_rest_client.py
@@ -15,11 +15,12 @@
import httplib2
import json
+from oslotest import mockpatch
+
from tempest.common import rest_client
from tempest.common import xml_utils as xml
from tempest import config
from tempest import exceptions
-from tempest.openstack.common.fixture import mockpatch
from tempest.tests import base
from tempest.tests import fake_auth_provider
from tempest.tests import fake_config
diff --git a/tempest/tests/test_ssh.py b/tempest/tests/test_ssh.py
index a6eedc4..0da52dc 100644
--- a/tempest/tests/test_ssh.py
+++ b/tempest/tests/test_ssh.py
@@ -14,6 +14,7 @@
import contextlib
import socket
+import time
import mock
import testtools
@@ -43,25 +44,21 @@
rsa_mock.assert_not_called()
cs_mock.assert_not_called()
- def test_get_ssh_connection(self):
- c_mock = self.patch('paramiko.SSHClient')
- aa_mock = self.patch('paramiko.AutoAddPolicy')
- s_mock = self.patch('time.sleep')
- t_mock = self.patch('time.time')
+ def _set_ssh_connection_mocks(self):
+ client_mock = mock.MagicMock()
+ client_mock.connect.return_value = True
+ return (self.patch('paramiko.SSHClient'),
+ self.patch('paramiko.AutoAddPolicy'),
+ client_mock)
+ def test_get_ssh_connection(self):
+ c_mock, aa_mock, client_mock = self._set_ssh_connection_mocks()
+ s_mock = self.patch('time.sleep')
+
+ c_mock.return_value = client_mock
aa_mock.return_value = mock.sentinel.aa
- def reset_mocks():
- aa_mock.reset_mock()
- c_mock.reset_mock()
- s_mock.reset_mock()
- t_mock.reset_mock()
-
# Test normal case for successful connection on first try
- client_mock = mock.MagicMock()
- c_mock.return_value = client_mock
- client_mock.connect.return_value = True
-
client = ssh.Client('localhost', 'root', timeout=2)
client._get_ssh_connection(sleep=1)
@@ -79,50 +76,40 @@
)]
self.assertEqual(expected_connect, client_mock.connect.mock_calls)
s_mock.assert_not_called()
- t_mock.assert_called_once_with()
- reset_mocks()
+ def test_get_ssh_connection_two_attemps(self):
+ c_mock, aa_mock, client_mock = self._set_ssh_connection_mocks()
- # Test case when connection fails on first two tries and
- # succeeds on third try (this validates retry logic)
- client_mock.connect.side_effect = [socket.error, socket.error, True]
- t_mock.side_effect = [
- 1000, # Start time
- 1000, # LOG.warning() calls time.time() loop 1
- 1001, # Sleep loop 1
- 1001, # LOG.warning() calls time.time() loop 2
- 1002 # Sleep loop 2
+ c_mock.return_value = client_mock
+ client_mock.connect.side_effect = [
+ socket.error,
+ mock.MagicMock()
]
+ client = ssh.Client('localhost', 'root', timeout=1)
+ start_time = int(time.time())
client._get_ssh_connection(sleep=1)
+ end_time = int(time.time())
+ self.assertTrue((end_time - start_time) < 3)
+ self.assertTrue((end_time - start_time) > 1)
- expected_sleeps = [
- mock.call(2),
- mock.call(3)
- ]
- self.assertEqual(expected_sleeps, s_mock.mock_calls)
+ def test_get_ssh_connection_timeout(self):
+ c_mock, aa_mock, client_mock = self._set_ssh_connection_mocks()
- reset_mocks()
-
- # Test case when connection fails on first three tries and
- # exceeds the timeout, so expect to raise a Timeout exception
+ c_mock.return_value = client_mock
client_mock.connect.side_effect = [
socket.error,
socket.error,
- socket.error
- ]
- t_mock.side_effect = [
- 1000, # Start time
- 1000, # LOG.warning() calls time.time() loop 1
- 1001, # Sleep loop 1
- 1001, # LOG.warning() calls time.time() loop 2
- 1002, # Sleep loop 2
- 1003, # Sleep loop 3
- 1004 # LOG.error() calls time.time()
+ socket.error,
]
+ client = ssh.Client('localhost', 'root', timeout=2)
+ start_time = int(time.time())
with testtools.ExpectedException(exceptions.SSHTimeout):
client._get_ssh_connection()
+ end_time = int(time.time())
+ self.assertTrue((end_time - start_time) < 4)
+ self.assertTrue((end_time - start_time) >= 2)
def test_exec_command(self):
gsc_mock = self.patch('tempest.common.ssh.Client._get_ssh_connection')
diff --git a/tempest/tests/test_tenant_isolation.py b/tempest/tests/test_tenant_isolation.py
index ae2e57d..7a9b6be 100644
--- a/tempest/tests/test_tenant_isolation.py
+++ b/tempest/tests/test_tenant_isolation.py
@@ -13,10 +13,12 @@
# under the License.
import keystoneclient.v2_0.client as keystoneclient
-from mock import patch
+import mock
import neutronclient.v2_0.client as neutronclient
from oslo.config import cfg
+from tempest import clients
+from tempest.common import http
from tempest.common import isolated_creds
from tempest import config
from tempest import exceptions
@@ -27,6 +29,8 @@
from tempest.services.network.xml import network_client as xml_network_client
from tempest.tests import base
from tempest.tests import fake_config
+from tempest.tests import fake_http
+from tempest.tests import fake_identity
class TestTenantIsolation(base.TestCase):
@@ -35,6 +39,9 @@
super(TestTenantIsolation, self).setUp()
self.useFixture(fake_config.ConfigFixture())
self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
+ self.fake_http = fake_http.fake_httplib2(return_type=200)
+ self.stubs.Set(http.ClosingHttp, 'request',
+ fake_identity._fake_v2_response)
def test_tempest_client(self):
iso_creds = isolated_creds.IsolatedCreds('test class')
@@ -46,6 +53,12 @@
def test_official_client(self):
self.useFixture(mockpatch.PatchObject(keystoneclient.Client,
'authenticate'))
+ self.useFixture(mockpatch.PatchObject(clients.OfficialClientManager,
+ '_get_image_client'))
+ self.useFixture(mockpatch.PatchObject(clients.OfficialClientManager,
+ '_get_object_storage_client'))
+ self.useFixture(mockpatch.PatchObject(clients.OfficialClientManager,
+ '_get_orchestration_client'))
iso_creds = isolated_creds.IsolatedCreds('test class',
tempest_client=False)
self.assertTrue(isinstance(iso_creds.identity_admin_client,
@@ -101,23 +114,21 @@
{'router': {'id': id, 'name': name}})))
return router_fix
- @patch('tempest.common.rest_client.RestClient')
+ @mock.patch('tempest.common.rest_client.RestClient')
def test_primary_creds(self, MockRestClient):
cfg.CONF.set_default('neutron', False, 'service_available')
iso_creds = isolated_creds.IsolatedCreds('test class',
password='fake_password')
self._mock_tenant_create('1234', 'fake_prim_tenant')
self._mock_user_create('1234', 'fake_prim_user')
- username, tenant_name, password = iso_creds.get_primary_creds()
- self.assertEqual(username, 'fake_prim_user')
- self.assertEqual(tenant_name, 'fake_prim_tenant')
- # Verify helper methods
- tenant = iso_creds.get_primary_tenant()
- user = iso_creds.get_primary_user()
- self.assertEqual(tenant['id'], '1234')
- self.assertEqual(user['id'], '1234')
+ primary_creds = iso_creds.get_primary_creds()
+ self.assertEqual(primary_creds.username, 'fake_prim_user')
+ self.assertEqual(primary_creds.tenant_name, 'fake_prim_tenant')
+ # Verify IDs
+ self.assertEqual(primary_creds.tenant_id, '1234')
+ self.assertEqual(primary_creds.user_id, '1234')
- @patch('tempest.common.rest_client.RestClient')
+ @mock.patch('tempest.common.rest_client.RestClient')
def test_admin_creds(self, MockRestClient):
cfg.CONF.set_default('neutron', False, 'service_available')
iso_creds = isolated_creds.IsolatedCreds('test class',
@@ -130,35 +141,33 @@
return_value=({'status': 200},
[{'id': '1234', 'name': 'admin'}])))
- user_mock = patch.object(json_iden_client.IdentityClientJSON,
- 'assign_user_role')
+ user_mock = mock.patch.object(json_iden_client.IdentityClientJSON,
+ 'assign_user_role')
user_mock.start()
self.addCleanup(user_mock.stop)
- with patch.object(json_iden_client.IdentityClientJSON,
- 'assign_user_role') as user_mock:
- username, tenant_name, password = iso_creds.get_admin_creds()
+ with mock.patch.object(json_iden_client.IdentityClientJSON,
+ 'assign_user_role') as user_mock:
+ admin_creds = iso_creds.get_admin_creds()
user_mock.assert_called_once_with('1234', '1234', '1234')
- self.assertEqual(username, 'fake_admin_user')
- self.assertEqual(tenant_name, 'fake_admin_tenant')
- # Verify helper methods
- tenant = iso_creds.get_admin_tenant()
- user = iso_creds.get_admin_user()
- self.assertEqual(tenant['id'], '1234')
- self.assertEqual(user['id'], '1234')
+ self.assertEqual(admin_creds.username, 'fake_admin_user')
+ self.assertEqual(admin_creds.tenant_name, 'fake_admin_tenant')
+ # Verify IDs
+ self.assertEqual(admin_creds.tenant_id, '1234')
+ self.assertEqual(admin_creds.user_id, '1234')
- @patch('tempest.common.rest_client.RestClient')
+ @mock.patch('tempest.common.rest_client.RestClient')
def test_all_cred_cleanup(self, MockRestClient):
cfg.CONF.set_default('neutron', False, 'service_available')
iso_creds = isolated_creds.IsolatedCreds('test class',
password='fake_password')
tenant_fix = self._mock_tenant_create('1234', 'fake_prim_tenant')
user_fix = self._mock_user_create('1234', 'fake_prim_user')
- username, tenant_name, password = iso_creds.get_primary_creds()
+ iso_creds.get_primary_creds()
tenant_fix.cleanUp()
user_fix.cleanUp()
tenant_fix = self._mock_tenant_create('12345', 'fake_alt_tenant')
user_fix = self._mock_user_create('12345', 'fake_alt_user')
- alt_username, alt_tenant, alt_password = iso_creds.get_alt_creds()
+ iso_creds.get_alt_creds()
tenant_fix.cleanUp()
user_fix.cleanUp()
tenant_fix = self._mock_tenant_create('123456', 'fake_admin_tenant')
@@ -168,10 +177,9 @@
'list_roles',
return_value=({'status': 200},
[{'id': '123456', 'name': 'admin'}])))
- with patch.object(json_iden_client.IdentityClientJSON,
- 'assign_user_role'):
- admin_username, admin_tenant, admin_pass = \
- iso_creds.get_admin_creds()
+ with mock.patch.object(json_iden_client.IdentityClientJSON,
+ 'assign_user_role'):
+ iso_creds.get_admin_creds()
user_mock = self.patch(
'tempest.services.identity.json.identity_client.'
'IdentityClientJSON.delete_user')
@@ -194,23 +202,21 @@
self.assertIn('12345', args)
self.assertIn('123456', args)
- @patch('tempest.common.rest_client.RestClient')
+ @mock.patch('tempest.common.rest_client.RestClient')
def test_alt_creds(self, MockRestClient):
cfg.CONF.set_default('neutron', False, 'service_available')
iso_creds = isolated_creds.IsolatedCreds('test class',
password='fake_password')
self._mock_user_create('1234', 'fake_alt_user')
self._mock_tenant_create('1234', 'fake_alt_tenant')
- username, tenant_name, password = iso_creds.get_alt_creds()
- self.assertEqual(username, 'fake_alt_user')
- self.assertEqual(tenant_name, 'fake_alt_tenant')
- # Verify helper methods
- tenant = iso_creds.get_alt_tenant()
- user = iso_creds.get_alt_user()
- self.assertEqual(tenant['id'], '1234')
- self.assertEqual(user['id'], '1234')
+ alt_creds = iso_creds.get_alt_creds()
+ self.assertEqual(alt_creds.username, 'fake_alt_user')
+ self.assertEqual(alt_creds.tenant_name, 'fake_alt_tenant')
+ # Verify IDs
+ self.assertEqual(alt_creds.tenant_id, '1234')
+ self.assertEqual(alt_creds.user_id, '1234')
- @patch('tempest.common.rest_client.RestClient')
+ @mock.patch('tempest.common.rest_client.RestClient')
def test_network_creation(self, MockRestClient):
iso_creds = isolated_creds.IsolatedCreds('test class',
password='fake_password')
@@ -222,7 +228,7 @@
router_interface_mock = self.patch(
'tempest.services.network.json.network_client.NetworkClientJSON.'
'add_router_interface_with_subnet_id')
- username, tenant_name, password = iso_creds.get_primary_creds()
+ iso_creds.get_primary_creds()
router_interface_mock.called_once_with('1234', '1234')
network = iso_creds.get_primary_network()
subnet = iso_creds.get_primary_subnet()
@@ -234,7 +240,7 @@
self.assertEqual(router['id'], '1234')
self.assertEqual(router['name'], 'fake_router')
- @patch('tempest.common.rest_client.RestClient')
+ @mock.patch('tempest.common.rest_client.RestClient')
def test_network_cleanup(self, MockRestClient):
iso_creds = isolated_creds.IsolatedCreds('test class',
password='fake_password')
@@ -247,7 +253,7 @@
router_interface_mock = self.patch(
'tempest.services.network.json.network_client.NetworkClientJSON.'
'add_router_interface_with_subnet_id')
- username, tenant_name, password = iso_creds.get_primary_creds()
+ iso_creds.get_primary_creds()
router_interface_mock.called_once_with('1234', '1234')
router_interface_mock.reset_mock()
tenant_fix.cleanUp()
@@ -262,7 +268,7 @@
subnet_fix = self._mock_subnet_create(iso_creds, '12345',
'fake_alt_subnet')
router_fix = self._mock_router_create('12345', 'fake_alt_router')
- alt_username, alt_tenant_name, password = iso_creds.get_alt_creds()
+ iso_creds.get_alt_creds()
router_interface_mock.called_once_with('12345', '12345')
router_interface_mock.reset_mock()
tenant_fix.cleanUp()
@@ -283,28 +289,28 @@
'list_roles',
return_value=({'status': 200},
[{'id': '123456', 'name': 'admin'}])))
- with patch.object(json_iden_client.IdentityClientJSON,
- 'assign_user_role'):
- admin_user, admin_tenant, password = iso_creds.get_admin_creds()
+ with mock.patch.object(json_iden_client.IdentityClientJSON,
+ 'assign_user_role'):
+ iso_creds.get_admin_creds()
self.patch('tempest.services.identity.json.identity_client.'
'IdentityClientJSON.delete_user')
self.patch('tempest.services.identity.json.identity_client.'
'IdentityClientJSON.delete_tenant')
- net = patch.object(iso_creds.network_admin_client,
- 'delete_network')
+ net = mock.patch.object(iso_creds.network_admin_client,
+ 'delete_network')
net_mock = net.start()
- subnet = patch.object(iso_creds.network_admin_client,
- 'delete_subnet')
+ subnet = mock.patch.object(iso_creds.network_admin_client,
+ 'delete_subnet')
subnet_mock = subnet.start()
- router = patch.object(iso_creds.network_admin_client,
- 'delete_router')
+ router = mock.patch.object(iso_creds.network_admin_client,
+ 'delete_router')
router_mock = router.start()
remove_router_interface_mock = self.patch(
'tempest.services.network.json.network_client.NetworkClientJSON.'
'remove_router_interface_with_subnet_id')
- port_list_mock = patch.object(iso_creds.network_admin_client,
- 'list_ports', return_value=(
- {'status': 200}, {'ports': []}))
+ port_list_mock = mock.patch.object(iso_creds.network_admin_client,
+ 'list_ports', return_value=(
+ {'status': 200}, {'ports': []}))
port_list_mock.start()
iso_creds.clear_isolated_creds()
# Verify remove router interface calls
@@ -336,7 +342,7 @@
self.assertIn('12345', args)
self.assertIn('123456', args)
- @patch('tempest.common.rest_client.RestClient')
+ @mock.patch('tempest.common.rest_client.RestClient')
def test_network_alt_creation(self, MockRestClient):
iso_creds = isolated_creds.IsolatedCreds('test class',
password='fake_password')
@@ -348,7 +354,7 @@
router_interface_mock = self.patch(
'tempest.services.network.json.network_client.NetworkClientJSON.'
'add_router_interface_with_subnet_id')
- username, tenant_name, password = iso_creds.get_alt_creds()
+ iso_creds.get_alt_creds()
router_interface_mock.called_once_with('1234', '1234')
network = iso_creds.get_alt_network()
subnet = iso_creds.get_alt_subnet()
@@ -360,7 +366,7 @@
self.assertEqual(router['id'], '1234')
self.assertEqual(router['name'], 'fake_alt_router')
- @patch('tempest.common.rest_client.RestClient')
+ @mock.patch('tempest.common.rest_client.RestClient')
def test_network_admin_creation(self, MockRestClient):
iso_creds = isolated_creds.IsolatedCreds('test class',
password='fake_password')
@@ -377,9 +383,9 @@
'list_roles',
return_value=({'status': 200},
[{'id': '123456', 'name': 'admin'}])))
- with patch.object(json_iden_client.IdentityClientJSON,
- 'assign_user_role'):
- username, tenant_name, password = iso_creds.get_admin_creds()
+ with mock.patch.object(json_iden_client.IdentityClientJSON,
+ 'assign_user_role'):
+ iso_creds.get_admin_creds()
router_interface_mock.called_once_with('1234', '1234')
network = iso_creds.get_admin_network()
subnet = iso_creds.get_admin_subnet()
@@ -391,7 +397,7 @@
self.assertEqual(router['id'], '1234')
self.assertEqual(router['name'], 'fake_admin_router')
- @patch('tempest.common.rest_client.RestClient')
+ @mock.patch('tempest.common.rest_client.RestClient')
def test_no_network_resources(self, MockRestClient):
net_dict = {
'network': False,
@@ -404,17 +410,17 @@
network_resources=net_dict)
self._mock_user_create('1234', 'fake_prim_user')
self._mock_tenant_create('1234', 'fake_prim_tenant')
- net = patch.object(iso_creds.network_admin_client,
- 'delete_network')
+ net = mock.patch.object(iso_creds.network_admin_client,
+ 'delete_network')
net_mock = net.start()
- subnet = patch.object(iso_creds.network_admin_client,
- 'delete_subnet')
+ subnet = mock.patch.object(iso_creds.network_admin_client,
+ 'delete_subnet')
subnet_mock = subnet.start()
- router = patch.object(iso_creds.network_admin_client,
- 'delete_router')
+ router = mock.patch.object(iso_creds.network_admin_client,
+ 'delete_router')
router_mock = router.start()
- username, tenant_name, password = iso_creds.get_primary_creds()
+ iso_creds.get_primary_creds()
self.assertEqual(net_mock.mock_calls, [])
self.assertEqual(subnet_mock.mock_calls, [])
self.assertEqual(router_mock.mock_calls, [])
@@ -425,7 +431,7 @@
self.assertIsNone(subnet)
self.assertIsNone(router)
- @patch('tempest.common.rest_client.RestClient')
+ @mock.patch('tempest.common.rest_client.RestClient')
def test_router_without_network(self, MockRestClient):
net_dict = {
'network': False,
@@ -441,7 +447,7 @@
self.assertRaises(exceptions.InvalidConfiguration,
iso_creds.get_primary_creds)
- @patch('tempest.common.rest_client.RestClient')
+ @mock.patch('tempest.common.rest_client.RestClient')
def test_subnet_without_network(self, MockRestClient):
net_dict = {
'network': False,
@@ -457,7 +463,7 @@
self.assertRaises(exceptions.InvalidConfiguration,
iso_creds.get_primary_creds)
- @patch('tempest.common.rest_client.RestClient')
+ @mock.patch('tempest.common.rest_client.RestClient')
def test_dhcp_without_subnet(self, MockRestClient):
net_dict = {
'network': False,
diff --git a/tempest/tests/test_wrappers.py b/tempest/tests/test_wrappers.py
index f6ed445..bba4012 100644
--- a/tempest/tests/test_wrappers.py
+++ b/tempest/tests/test_wrappers.py
@@ -14,6 +14,7 @@
import os
import shutil
+import StringIO
import subprocess
import tempfile
@@ -33,6 +34,7 @@
# Setup Test files
self.testr_conf_file = os.path.join(self.directory, '.testr.conf')
self.setup_cfg_file = os.path.join(self.directory, 'setup.cfg')
+ self.subunit_trace = os.path.join(self.directory, 'subunit-trace.py')
self.passing_file = os.path.join(self.test_dir, 'test_passing.py')
self.failing_file = os.path.join(self.test_dir, 'test_failing.py')
self.init_file = os.path.join(self.test_dir, '__init__.py')
@@ -43,55 +45,48 @@
shutil.copy('setup.py', self.setup_py)
shutil.copy('tempest/tests/files/setup.cfg', self.setup_cfg_file)
shutil.copy('tempest/tests/files/__init__.py', self.init_file)
+ shutil.copy('tools/subunit-trace.py', self.subunit_trace)
+ # copy over the pretty_tox scripts
+ shutil.copy('tools/pretty_tox.sh',
+ os.path.join(self.directory, 'pretty_tox.sh'))
+ shutil.copy('tools/pretty_tox_serial.sh',
+ os.path.join(self.directory, 'pretty_tox_serial.sh'))
+
+ self.stdout = StringIO.StringIO()
+ self.stderr = StringIO.StringIO()
+ # Change directory, run wrapper and check result
+ self.addCleanup(os.chdir, os.path.abspath(os.curdir))
+ os.chdir(self.directory)
+
+ def assertRunExit(self, cmd, expected):
+ p = subprocess.Popen(
+ "bash %s" % cmd, shell=True,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ # wait in the general case is dangerous, however the amount of
+ # data coming back on those pipes is small enough it shouldn't be
+ # a problem.
+ p.wait()
+
+ self.assertEqual(
+ p.returncode, expected,
+ "Stdout: %s; Stderr: %s" % (p.stdout, p.stderr))
def test_pretty_tox(self):
- # Copy wrapper script and requirements:
- pretty_tox = os.path.join(self.directory, 'pretty_tox.sh')
- shutil.copy('tools/pretty_tox.sh', pretty_tox)
- # Change directory, run wrapper and check result
- self.addCleanup(os.chdir, os.path.abspath(os.curdir))
- os.chdir(self.directory)
# Git init is required for the pbr testr command. pbr requires a git
# version or an sdist to work. so make the test directory a git repo
# too.
- subprocess.call(['git', 'init'])
- exit_code = subprocess.call('bash pretty_tox.sh tests.passing',
- shell=True, stdout=DEVNULL, stderr=DEVNULL)
- self.assertEqual(exit_code, 0)
+ subprocess.call(['git', 'init'], stderr=DEVNULL)
+ self.assertRunExit('pretty_tox.sh tests.passing', 0)
def test_pretty_tox_fails(self):
- # Copy wrapper script and requirements:
- pretty_tox = os.path.join(self.directory, 'pretty_tox.sh')
- shutil.copy('tools/pretty_tox.sh', pretty_tox)
- # Change directory, run wrapper and check result
- self.addCleanup(os.chdir, os.path.abspath(os.curdir))
- os.chdir(self.directory)
# Git init is required for the pbr testr command. pbr requires a git
# version or an sdist to work. so make the test directory a git repo
# too.
- subprocess.call(['git', 'init'])
- exit_code = subprocess.call('bash pretty_tox.sh', shell=True,
- stdout=DEVNULL, stderr=DEVNULL)
- self.assertEqual(exit_code, 1)
+ subprocess.call(['git', 'init'], stderr=DEVNULL)
+ self.assertRunExit('pretty_tox.sh', 1)
def test_pretty_tox_serial(self):
- # Copy wrapper script and requirements:
- pretty_tox = os.path.join(self.directory, 'pretty_tox_serial.sh')
- shutil.copy('tools/pretty_tox_serial.sh', pretty_tox)
- # Change directory, run wrapper and check result
- self.addCleanup(os.chdir, os.path.abspath(os.curdir))
- os.chdir(self.directory)
- exit_code = subprocess.call('bash pretty_tox_serial.sh tests.passing',
- shell=True, stdout=DEVNULL, stderr=DEVNULL)
- self.assertEqual(exit_code, 0)
+ self.assertRunExit('pretty_tox_serial.sh tests.passing', 0)
def test_pretty_tox_serial_fails(self):
- # Copy wrapper script and requirements:
- pretty_tox = os.path.join(self.directory, 'pretty_tox_serial.sh')
- shutil.copy('tools/pretty_tox_serial.sh', pretty_tox)
- # Change directory, run wrapper and check result
- self.addCleanup(os.chdir, os.path.abspath(os.curdir))
- os.chdir(self.directory)
- exit_code = subprocess.call('bash pretty_tox_serial.sh', shell=True,
- stdout=DEVNULL, stderr=DEVNULL)
- self.assertEqual(exit_code, 1)
+ self.assertRunExit('pretty_tox_serial.sh', 1)
diff --git a/tempest/thirdparty/boto/test_ec2_instance_run.py b/tempest/thirdparty/boto/test_ec2_instance_run.py
index 33b8d6e..b2eb18d 100644
--- a/tempest/thirdparty/boto/test_ec2_instance_run.py
+++ b/tempest/thirdparty/boto/test_ec2_instance_run.py
@@ -193,7 +193,6 @@
instance.terminate()
self.cancelResourceCleanUp(rcuk)
- @test.skip_because(bug="1098891")
@test.attr(type='smoke')
def test_run_terminate_instance(self):
# EC2 run, terminate immediately
@@ -211,7 +210,7 @@
pass
except exception.EC2ResponseError as exc:
if self.ec2_error_code.\
- client.InvalidInstanceID.NotFound.match(exc):
+ client.InvalidInstanceID.NotFound.match(exc) is None:
pass
else:
raise
diff --git a/test-requirements.txt b/test-requirements.txt
index 8d64167..b9c75c8 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,9 +1,10 @@
hacking>=0.8.0,<0.9
# needed for doc build
docutils==0.9.1
-sphinx>=1.1.2,<1.2
+sphinx>=1.2.1,<1.3
python-subunit>=0.0.18
oslosphinx
mox>=0.5.3
mock>=1.0
coverage>=3.6
+oslotest
diff --git a/tools/install_venv_common.py b/tools/install_venv_common.py
index 46822e3..743b59d 100644
--- a/tools/install_venv_common.py
+++ b/tools/install_venv_common.py
@@ -101,7 +101,6 @@
print('done.')
else:
print("venv already exists...")
- pass
def pip_install(self, *args):
self.run_command(['tools/with_venv.sh',
diff --git a/tools/pretty_tox.sh b/tools/pretty_tox.sh
index 07c35a0..0a04ce6 100755
--- a/tools/pretty_tox.sh
+++ b/tools/pretty_tox.sh
@@ -3,4 +3,4 @@
set -o pipefail
TESTRARGS=$1
-python setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | subunit2pyunit
+python setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | $(dirname $0)/subunit-trace.py --no-failure-debug -f
diff --git a/tools/pretty_tox_serial.sh b/tools/pretty_tox_serial.sh
index 42ce760..db70890 100755
--- a/tools/pretty_tox_serial.sh
+++ b/tools/pretty_tox_serial.sh
@@ -7,7 +7,8 @@
if [ ! -d .testrepository ]; then
testr init
fi
-testr run --subunit $TESTRARGS | subunit2pyunit
+testr run --subunit $TESTRARGS | $(dirname $0)/subunit-trace.py -f -n
retval=$?
testr slowest
+
exit $retval
diff --git a/tools/subunit-trace.py b/tools/subunit-trace.py
new file mode 100755
index 0000000..9bfefe1
--- /dev/null
+++ b/tools/subunit-trace.py
@@ -0,0 +1,286 @@
+#!/usr/bin/env python
+
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+# Copyright 2014 Samsung Electronics
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Trace a subunit stream in reasonable detail and high accuracy."""
+
+import argparse
+import functools
+import re
+import sys
+
+import mimeparse
+import subunit
+import testtools
+
+DAY_SECONDS = 60 * 60 * 24
+FAILS = []
+RESULTS = {}
+
+
+class Starts(testtools.StreamResult):
+
+ def __init__(self, output):
+ super(Starts, self).__init__()
+ self._output = output
+
+ def startTestRun(self):
+ self._neednewline = False
+ self._emitted = set()
+
+ def status(self, test_id=None, test_status=None, test_tags=None,
+ runnable=True, file_name=None, file_bytes=None, eof=False,
+ mime_type=None, route_code=None, timestamp=None):
+ super(Starts, self).status(
+ test_id, test_status,
+ test_tags=test_tags, runnable=runnable, file_name=file_name,
+ file_bytes=file_bytes, eof=eof, mime_type=mime_type,
+ route_code=route_code, timestamp=timestamp)
+ if not test_id:
+ if not file_bytes:
+ return
+ if not mime_type or mime_type == 'test/plain;charset=utf8':
+ mime_type = 'text/plain; charset=utf-8'
+ primary, sub, parameters = mimeparse.parse_mime_type(mime_type)
+ content_type = testtools.content_type.ContentType(
+ primary, sub, parameters)
+ content = testtools.content.Content(
+ content_type, lambda: [file_bytes])
+ text = content.as_text()
+ if text and text[-1] not in '\r\n':
+ self._neednewline = True
+ self._output.write(text)
+ elif test_status == 'inprogress' and test_id not in self._emitted:
+ if self._neednewline:
+ self._neednewline = False
+ self._output.write('\n')
+ worker = ''
+ for tag in test_tags or ():
+ if tag.startswith('worker-'):
+ worker = '(' + tag[7:] + ') '
+ if timestamp:
+ timestr = timestamp.isoformat()
+ else:
+ timestr = ''
+ self._output.write('%s: %s%s [start]\n' %
+ (timestr, worker, test_id))
+ self._emitted.add(test_id)
+
+
+def cleanup_test_name(name, strip_tags=True, strip_scenarios=False):
+ """Clean up the test name for display.
+
+ By default we strip out the tags in the test because they don't help us
+ in identifying the test that is run to it's result.
+
+ Make it possible to strip out the testscenarios information (not to
+ be confused with tempest scenarios) however that's often needed to
+ indentify generated negative tests.
+ """
+ if strip_tags:
+ tags_start = name.find('[')
+ tags_end = name.find(']')
+ if tags_start > 0 and tags_end > tags_start:
+ newname = name[:tags_start]
+ newname += name[tags_end + 1:]
+ name = newname
+
+ if strip_scenarios:
+ tags_start = name.find('(')
+ tags_end = name.find(')')
+ if tags_start > 0 and tags_end > tags_start:
+ newname = name[:tags_start]
+ newname += name[tags_end + 1:]
+ name = newname
+
+ return name
+
+
+def get_duration(timestamps):
+ start, end = timestamps
+ if not start or not end:
+ duration = ''
+ else:
+ delta = end - start
+ duration = '%d.%06ds' % (
+ delta.days * DAY_SECONDS + delta.seconds, delta.microseconds)
+ return duration
+
+
+def find_worker(test):
+ for tag in test['tags']:
+ if tag.startswith('worker-'):
+ return int(tag[7:])
+ return 'NaN'
+
+
+# Print out stdout/stderr if it exists, always
+def print_attachments(stream, test, all_channels=False):
+ """Print out subunit attachments.
+
+ Print out subunit attachments that contain content. This
+ runs in 2 modes, one for successes where we print out just stdout
+ and stderr, and an override that dumps all the attachments.
+ """
+ channels = ('stdout', 'stderr')
+ for name, detail in test['details'].items():
+ # NOTE(sdague): the subunit names are a little crazy, and actually
+ # are in the form pythonlogging:'' (with the colon and quotes)
+ name = name.split(':')[0]
+ if detail.content_type.type == 'test':
+ detail.content_type.type = 'text'
+ if (all_channels or name in channels) and detail.as_text():
+ title = "Captured %s:" % name
+ stream.write("\n%s\n%s\n" % (title, ('~' * len(title))))
+ # indent attachment lines 4 spaces to make them visually
+ # offset
+ for line in detail.as_text().split('\n'):
+ stream.write(" %s\n" % line)
+
+
+def show_outcome(stream, test, print_failures=False):
+ global RESULTS
+ status = test['status']
+ # TODO(sdague): ask lifeless why on this?
+ if status == 'exists':
+ return
+
+ worker = find_worker(test)
+ name = cleanup_test_name(test['id'])
+ duration = get_duration(test['timestamps'])
+
+ if worker not in RESULTS:
+ RESULTS[worker] = []
+ RESULTS[worker].append(test)
+
+ # don't count the end of the return code as a fail
+ if name == 'process-returncode':
+ return
+
+ if status == 'success':
+ stream.write('{%s} %s [%s] ... ok\n' % (
+ worker, name, duration))
+ print_attachments(stream, test)
+ elif status == 'fail':
+ FAILS.append(test)
+ stream.write('{%s} %s [%s] ... FAILED\n' % (
+ worker, name, duration))
+ if not print_failures:
+ print_attachments(stream, test, all_channels=True)
+ elif status == 'skip':
+ stream.write('{%s} %s ... SKIPPED: %s\n' % (
+ worker, name, test['details']['reason'].as_text()))
+ else:
+ stream.write('{%s} %s [%s] ... %s\n' % (
+ worker, name, duration, test['status']))
+ if not print_failures:
+ print_attachments(stream, test, all_channels=True)
+
+ stream.flush()
+
+
+def print_fails(stream):
+ """Print summary failure report.
+
+ Currently unused, however there remains debate on inline vs. at end
+ reporting, so leave the utility function for later use.
+ """
+ if not FAILS:
+ return
+ stream.write("\n==============================\n")
+ stream.write("Failed %s tests - output below:" % len(FAILS))
+ stream.write("\n==============================\n")
+ for f in FAILS:
+ stream.write("\n%s\n" % f['id'])
+ stream.write("%s\n" % ('-' * len(f['id'])))
+ print_attachments(stream, f, all_channels=True)
+ stream.write('\n')
+
+
+def count_tests(key, value):
+ count = 0
+ for k, v in RESULTS.items():
+ for item in v:
+ if key in item:
+ if re.search(value, item[key]):
+ count += 1
+ return count
+
+
+def worker_stats(worker):
+ tests = RESULTS[worker]
+ num_tests = len(tests)
+ delta = tests[-1]['timestamps'][1] - tests[0]['timestamps'][0]
+ return num_tests, delta
+
+
+def print_summary(stream):
+ stream.write("\n======\nTotals\n======\n")
+ stream.write("Run: %s\n" % count_tests('status', '.*'))
+ stream.write(" - Passed: %s\n" % count_tests('status', 'success'))
+ stream.write(" - Skipped: %s\n" % count_tests('status', 'skip'))
+ stream.write(" - Failed: %s\n" % count_tests('status', 'fail'))
+
+ # we could have no results, especially as we filter out the process-codes
+ if RESULTS:
+ stream.write("\n==============\nWorker Balance\n==============\n")
+
+ for w in range(max(RESULTS.keys()) + 1):
+ if w not in RESULTS:
+ stream.write(
+ " - WARNING: missing Worker %s! "
+ "Race in testr accounting.\n" % w)
+ else:
+ num, time = worker_stats(w)
+ stream.write(" - Worker %s (%s tests) => %ss\n" %
+ (w, num, time))
+
+
+def parse_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--no-failure-debug', '-n', action='store_true',
+ dest='print_failures', help='Disable printing failure '
+ 'debug infomation in realtime')
+ parser.add_argument('--fails', '-f', action='store_true',
+ dest='post_fails', help='Print failure debug '
+ 'information after the stream is proccesed')
+ return parser.parse_args()
+
+
+def main():
+ args = parse_args()
+ stream = subunit.ByteStreamToStreamResult(
+ sys.stdin, non_subunit_name='stdout')
+ starts = Starts(sys.stdout)
+ outcomes = testtools.StreamToDict(
+ functools.partial(show_outcome, sys.stdout,
+ print_failures=args.print_failures))
+ summary = testtools.StreamSummary()
+ result = testtools.CopyStreamResult([starts, outcomes, summary])
+ result.startTestRun()
+ try:
+ stream.run(result)
+ finally:
+ result.stopTestRun()
+ if args.post_fails:
+ print_fails(sys.stdout)
+ print_summary(sys.stdout)
+ return (0 if summary.wasSuccessful() else 1)
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/tools/tempest_auto_config.py b/tools/tempest_auto_config.py
deleted file mode 100644
index 5b8d05b..0000000
--- a/tools/tempest_auto_config.py
+++ /dev/null
@@ -1,395 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# This script aims to configure an initial OpenStack environment with all the
-# necessary configurations for tempest's run using nothing but OpenStack's
-# native API.
-# That includes, creating users, tenants, registering images (cirros),
-# configuring neutron and so on.
-#
-# ASSUMPTION: this script is run by an admin user as it is meant to configure
-# the OpenStack environment prior to actual use.
-
-# Config
-import ConfigParser
-import os
-import tarfile
-import urllib2
-
-# Default client libs
-import glanceclient as glance_client
-import keystoneclient.v2_0.client as keystone_client
-
-# Import OpenStack exceptions
-import glanceclient.exc as glance_exception
-import keystoneclient.exceptions as keystone_exception
-
-
-TEMPEST_TEMP_DIR = os.getenv("TEMPEST_TEMP_DIR", "/tmp").rstrip('/')
-TEMPEST_ROOT_DIR = os.getenv("TEMPEST_ROOT_DIR", os.getenv("HOME")).rstrip('/')
-
-# Environment variables override defaults
-TEMPEST_CONFIG_DIR = os.getenv("TEMPEST_CONFIG_DIR",
- "%s%s" % (TEMPEST_ROOT_DIR, "/etc")).rstrip('/')
-TEMPEST_CONFIG_FILE = os.getenv("TEMPEST_CONFIG_FILE",
- "%s%s" % (TEMPEST_CONFIG_DIR, "/tempest.conf"))
-TEMPEST_CONFIG_SAMPLE = os.getenv("TEMPEST_CONFIG_SAMPLE",
- "%s%s" % (TEMPEST_CONFIG_DIR,
- "/tempest.conf.sample"))
-# Image references
-IMAGE_DOWNLOAD_CHUNK_SIZE = 8 * 1024
-IMAGE_UEC_SOURCE_URL = os.getenv("IMAGE_UEC_SOURCE_URL",
- "http://download.cirros-cloud.net/0.3.1/"
- "cirros-0.3.1-x86_64-uec.tar.gz")
-TEMPEST_IMAGE_ID = os.getenv('IMAGE_ID')
-TEMPEST_IMAGE_ID_ALT = os.getenv('IMAGE_ID_ALT')
-IMAGE_STATUS_ACTIVE = 'active'
-
-
-class ClientManager(object):
- """
- Manager that provides access to the official python clients for
- calling various OpenStack APIs.
- """
- def __init__(self):
- self.identity_client = None
- self.image_client = None
- self.network_client = None
- self.compute_client = None
- self.volume_client = None
-
- def get_identity_client(self, **kwargs):
- """
- Returns the openstack identity python client
- :param username: a string representing the username
- :param password: a string representing the user's password
- :param tenant_name: a string representing the tenant name of the user
- :param auth_url: a string representing the auth url of the identity
- :param insecure: True if we wish to disable ssl certificate validation,
- False otherwise
- :returns an instance of openstack identity python client
- """
- if not self.identity_client:
- self.identity_client = keystone_client.Client(**kwargs)
-
- return self.identity_client
-
- def get_image_client(self, version="1", *args, **kwargs):
- """
- This method returns OpenStack glance python client
- :param version: a string representing the version of the glance client
- to use.
- :param string endpoint: A user-supplied endpoint URL for the glance
- service.
- :param string token: Token for authentication.
- :param integer timeout: Allows customization of the timeout for client
- http requests. (optional)
- :return: a Client object representing the glance client
- """
- if not self.image_client:
- self.image_client = glance_client.Client(version, *args, **kwargs)
-
- return self.image_client
-
-
-def get_tempest_config(path_to_config):
- """
- Gets the tempest configuration file as a ConfigParser object
- :param path_to_config: path to the config file
- :return: a ConfigParser object representing the tempest configuration file
- """
- # get the sample config file from the sample
- config = ConfigParser.ConfigParser()
- config.readfp(open(path_to_config))
-
- return config
-
-
-def update_config_admin_credentials(config, config_section):
- """
- Updates the tempest config with the admin credentials
- :param config: a ConfigParser object representing the tempest config file
- :param config_section: the section name where the admin credentials are
- """
- # Check if credentials are present, default uses the config credentials
- OS_USERNAME = os.getenv('OS_USERNAME',
- config.get(config_section, "admin_username"))
- OS_PASSWORD = os.getenv('OS_PASSWORD',
- config.get(config_section, "admin_password"))
- OS_TENANT_NAME = os.getenv('OS_TENANT_NAME',
- config.get(config_section, "admin_tenant_name"))
- OS_AUTH_URL = os.getenv('OS_AUTH_URL', config.get(config_section, "uri"))
-
- if not (OS_AUTH_URL and
- OS_USERNAME and
- OS_PASSWORD and
- OS_TENANT_NAME):
- raise Exception("Admin environment variables not found.")
-
- # TODO(tkammer): Add support for uri_v3
- config_identity_params = {'uri': OS_AUTH_URL,
- 'admin_username': OS_USERNAME,
- 'admin_password': OS_PASSWORD,
- 'admin_tenant_name': OS_TENANT_NAME}
-
- update_config_section_with_params(config,
- config_section,
- config_identity_params)
-
-
-def update_config_section_with_params(config, config_section, params):
- """
- Updates a given config object with given params
- :param config: a ConfigParser object representing the tempest config file
- :param config_section: the section we would like to update
- :param params: the parameters we wish to update for that section
- """
- for option, value in params.items():
- config.set(config_section, option, value)
-
-
-def get_identity_client_kwargs(config, config_section):
- """
- Get the required arguments for the identity python client
- :param config: a ConfigParser object representing the tempest config file
- :param config_section: the section name in the configuration where the
- arguments can be found
- :return: a dictionary representing the needed arguments for the identity
- client
- """
- username = config.get(config_section, 'admin_username')
- password = config.get(config_section, 'admin_password')
- tenant_name = config.get(config_section, 'admin_tenant_name')
- auth_url = config.get(config_section, 'uri')
- dscv = config.get(config_section, 'disable_ssl_certificate_validation')
- kwargs = {'username': username,
- 'password': password,
- 'tenant_name': tenant_name,
- 'auth_url': auth_url,
- 'insecure': dscv}
-
- return kwargs
-
-
-def create_user_with_tenant(identity_client, username, password, tenant_name):
- """
- Creates a user using a given identity client
- :param identity_client: openstack identity python client
- :param username: a string representing the username
- :param password: a string representing the user's password
- :param tenant_name: a string representing the tenant name of the user
- """
- # Try to create the necessary tenant
- tenant_id = None
- try:
- tenant_description = "Tenant for Tempest %s user" % username
- tenant = identity_client.tenants.create(tenant_name,
- tenant_description)
- tenant_id = tenant.id
- except keystone_exception.Conflict:
-
- # if already exist, use existing tenant
- tenant_list = identity_client.tenants.list()
- for tenant in tenant_list:
- if tenant.name == tenant_name:
- tenant_id = tenant.id
-
- # Try to create the user
- try:
- email = "%s@test.com" % username
- identity_client.users.create(name=username,
- password=password,
- email=email,
- tenant_id=tenant_id)
- except keystone_exception.Conflict:
-
- # if already exist, use existing user
- pass
-
-
-def create_users_and_tenants(identity_client,
- config,
- config_section):
- """
- Creates the two non admin users and tenants for tempest
- :param identity_client: openstack identity python client
- :param config: a ConfigParser object representing the tempest config file
- :param config_section: the section name of identity in the config
- """
- # Get the necessary params from the config file
- tenant_name = config.get(config_section, 'tenant_name')
- username = config.get(config_section, 'username')
- password = config.get(config_section, 'password')
-
- alt_tenant_name = config.get(config_section, 'alt_tenant_name')
- alt_username = config.get(config_section, 'alt_username')
- alt_password = config.get(config_section, 'alt_password')
-
- # Create the necessary users for the test runs
- create_user_with_tenant(identity_client, username, password, tenant_name)
- create_user_with_tenant(identity_client, alt_username, alt_password,
- alt_tenant_name)
-
-
-def get_image_client_kwargs(identity_client, config, config_section):
- """
- Get the required arguments for the image python client
- :param identity_client: openstack identity python client
- :param config: a ConfigParser object representing the tempest config file
- :param config_section: the section name of identity in the config
- :return: a dictionary representing the needed arguments for the image
- client
- """
-
- token = identity_client.auth_token
- endpoint = identity_client.\
- service_catalog.url_for(service_type='image', endpoint_type='publicURL'
- )
- dscv = config.get(config_section, 'disable_ssl_certificate_validation')
- kwargs = {'endpoint': endpoint,
- 'token': token,
- 'insecure': dscv}
-
- return kwargs
-
-
-def images_exist(image_client):
- """
- Checks whether the images ID's located in the environment variable are
- indeed registered
- :param image_client: the openstack python client representing the image
- client
- """
- exist = True
- if not TEMPEST_IMAGE_ID or not TEMPEST_IMAGE_ID_ALT:
- exist = False
- else:
- try:
- image_client.images.get(TEMPEST_IMAGE_ID)
- image_client.images.get(TEMPEST_IMAGE_ID_ALT)
- except glance_exception.HTTPNotFound:
- exist = False
-
- return exist
-
-
-def download_and_register_uec_images(image_client, download_url,
- download_folder):
- """
- Downloads and registered the UEC AKI/AMI/ARI images
- :param image_client:
- :param download_url: the url of the uec tar file
- :param download_folder: the destination folder we wish to save the file to
- """
- basename = os.path.basename(download_url)
- path = os.path.join(download_folder, basename)
-
- request = urllib2.urlopen(download_url)
-
- # First, download the file
- with open(path, "wb") as fp:
- while True:
- chunk = request.read(IMAGE_DOWNLOAD_CHUNK_SIZE)
- if not chunk:
- break
-
- fp.write(chunk)
-
- # Then extract and register images
- tar = tarfile.open(path, "r")
- for name in tar.getnames():
- file_obj = tar.extractfile(name)
- format = "aki"
-
- if file_obj.name.endswith(".img"):
- format = "ami"
-
- if file_obj.name.endswith("initrd"):
- format = "ari"
-
- # Register images in image client
- image_client.images.create(name=file_obj.name, disk_format=format,
- container_format=format, data=file_obj,
- is_public="true")
-
- tar.close()
-
-
-def create_images(image_client, config, config_section,
- download_url=IMAGE_UEC_SOURCE_URL,
- download_folder=TEMPEST_TEMP_DIR):
- """
- Creates images for tempest's use and registers the environment variables
- IMAGE_ID and IMAGE_ID_ALT with registered images
- :param image_client: OpenStack python image client
- :param config: a ConfigParser object representing the tempest config file
- :param config_section: the section name where the IMAGE ids are set
- :param download_url: the URL from which we should download the UEC tar
- :param download_folder: the place where we want to save the download file
- """
- if not images_exist(image_client):
- # Falls down to the default uec images
- download_and_register_uec_images(image_client, download_url,
- download_folder)
- image_ids = []
- for image in image_client.images.list():
- image_ids.append(image.id)
-
- os.environ["IMAGE_ID"] = image_ids[0]
- os.environ["IMAGE_ID_ALT"] = image_ids[1]
-
- params = {'image_ref': os.getenv("IMAGE_ID"),
- 'image_ref_alt': os.getenv("IMAGE_ID_ALT")}
-
- update_config_section_with_params(config, config_section, params)
-
-
-def main():
- """
- Main module to control the script
- """
- # Check if config file exists or fall to the default sample otherwise
- path_to_config = TEMPEST_CONFIG_SAMPLE
-
- if os.path.isfile(TEMPEST_CONFIG_FILE):
- path_to_config = TEMPEST_CONFIG_FILE
-
- config = get_tempest_config(path_to_config)
- update_config_admin_credentials(config, 'identity')
-
- client_manager = ClientManager()
-
- # Set the identity related info for tempest
- identity_client_kwargs = get_identity_client_kwargs(config,
- 'identity')
- identity_client = client_manager.get_identity_client(
- **identity_client_kwargs)
-
- # Create the necessary users and tenants for tempest run
- create_users_and_tenants(identity_client, config, 'identity')
-
- # Set the image related info for tempest
- image_client_kwargs = get_image_client_kwargs(identity_client,
- config,
- 'identity')
- image_client = client_manager.get_image_client(**image_client_kwargs)
-
- # Create the necessary users and tenants for tempest run
- create_images(image_client, config, 'compute')
-
- # TODO(tkammer): add network implementation
-
-if __name__ == "__main__":
- main()
diff --git a/tools/verify_tempest_config.py b/tools/verify_tempest_config.py
deleted file mode 100755
index 30785c4..0000000
--- a/tools/verify_tempest_config.py
+++ /dev/null
@@ -1,226 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import json
-import sys
-import urlparse
-
-import httplib2
-
-from tempest import clients
-from tempest import config
-
-
-CONF = config.CONF
-RAW_HTTP = httplib2.Http()
-
-
-def verify_glance_api_versions(os):
- # Check glance api versions
- __, versions = os.image_client.get_versions()
- if CONF.image_feature_enabled.api_v1 != ('v1.1' in versions or 'v1.0' in
- versions):
- print('Config option image api_v1 should be change to: %s' % (
- not CONF.image_feature_enabled.api_v1))
- if CONF.image_feature_enabled.api_v2 != ('v2.0' in versions):
- print('Config option image api_v2 should be change to: %s' % (
- not CONF.image_feature_enabled.api_v2))
-
-
-def _get_api_versions(os, service):
- client_dict = {
- 'nova': os.servers_client,
- 'keystone': os.identity_client,
- }
- client_dict[service].skip_path()
- endpoint_parts = urlparse.urlparse(client_dict[service])
- endpoint = endpoint_parts.scheme + '//' + endpoint_parts.netloc
- __, body = RAW_HTTP.request(endpoint, 'GET')
- client_dict[service].reset_path()
- body = json.loads(body)
- if service == 'keystone':
- versions = map(lambda x: x['id'], body['versions']['values'])
- else:
- versions = map(lambda x: x['id'], body['versions'])
- return versions
-
-
-def verify_keystone_api_versions(os):
- # Check keystone api versions
- versions = _get_api_versions(os, 'keystone')
- if CONF.identity_feature_enabled.api_v2 != ('v2.0' in versions):
- print('Config option identity api_v2 should be change to %s' % (
- not CONF.identity_feature_enabled.api_v2))
- if CONF.identity_feature_enabled.api_v3 != ('v3.0' in versions):
- print('Config option identity api_v3 should be change to %s' % (
- not CONF.identity_feature_enabled.api_v3))
-
-
-def verify_nova_api_versions(os):
- versions = _get_api_versions(os, 'nova')
- if CONF.compute_feature_enabled.api_v3 != ('v3.0' in versions):
- print('Config option compute api_v3 should be change to: %s' % (
- not CONF.compute_feature_enabled.api_v3))
-
-
-def get_extension_client(os, service):
- extensions_client = {
- 'nova': os.extensions_client,
- 'nova_v3': os.extensions_v3_client,
- 'cinder': os.volumes_extension_client,
- 'neutron': os.network_client,
- 'swift': os.account_client,
- }
- if service not in extensions_client:
- print('No tempest extensions client for %s' % service)
- exit(1)
- return extensions_client[service]
-
-
-def get_enabled_extensions(service):
- extensions_options = {
- 'nova': CONF.compute_feature_enabled.api_extensions,
- 'nova_v3': CONF.compute_feature_enabled.api_v3_extensions,
- 'cinder': CONF.volume_feature_enabled.api_extensions,
- 'neutron': CONF.network_feature_enabled.api_extensions,
- 'swift': CONF.object_storage_feature_enabled.discoverable_apis,
- }
- if service not in extensions_options:
- print('No supported extensions list option for %s' % service)
- exit(1)
- return extensions_options[service]
-
-
-def verify_extensions(os, service, results):
- extensions_client = get_extension_client(os, service)
- __, resp = extensions_client.list_extensions()
- if isinstance(resp, dict):
- # Neutron's extension 'name' field has is not a single word (it has
- # spaces in the string) Since that can't be used for list option the
- # api_extension option in the network-feature-enabled group uses alias
- # instead of name.
- if service == 'neutron':
- extensions = map(lambda x: x['alias'], resp['extensions'])
- elif service == 'swift':
- # Remove Swift general information from extensions list
- resp.pop('swift')
- extensions = resp.keys()
- else:
- extensions = map(lambda x: x['name'], resp['extensions'])
-
- else:
- extensions = map(lambda x: x['name'], resp)
- if not results.get(service):
- results[service] = {}
- extensions_opt = get_enabled_extensions(service)
- if extensions_opt[0] == 'all':
- results[service]['extensions'] = 'all'
- return results
- # Verify that all configured extensions are actually enabled
- for extension in extensions_opt:
- results[service][extension] = extension in extensions
- # Verify that there aren't additional extensions enabled that aren't
- # specified in the config list
- for extension in extensions:
- if extension not in extensions_opt:
- results[service][extension] = False
- return results
-
-
-def display_results(results):
- for service in results:
- # If all extensions are specified as being enabled there is no way to
- # verify this so we just assume this to be true
- if results[service].get('extensions'):
- continue
- extension_list = get_enabled_extensions(service)
- for extension in results[service]:
- if not results[service][extension]:
- if extension in extension_list:
- print("%s extension: %s should not be included in the list"
- " of enabled extensions" % (service, extension))
- else:
- print("%s extension: %s should be included in the list of "
- "enabled extensions" % (service, extension))
-
-
-def check_service_availability(os):
- services = []
- avail_services = []
- codename_match = {
- 'volume': 'cinder',
- 'network': 'neutron',
- 'image': 'glance',
- 'object_storage': 'swift',
- 'compute': 'nova',
- 'orchestration': 'heat',
- 'metering': 'ceilometer',
- 'telemetry': 'ceilometer',
- 'data_processing': 'savanna',
- 'baremetal': 'ironic',
- 'identity': 'keystone'
-
- }
- # Get catalog list for endpoints to use for validation
- __, endpoints = os.endpoints_client.list_endpoints()
- for endpoint in endpoints:
- __, service = os.service_client.get_service(endpoint['service_id'])
- services.append(service['type'])
- # Pull all catalog types from config file and compare against endpoint list
- for cfgname in dir(CONF._config):
- cfg = getattr(CONF, cfgname)
- catalog_type = getattr(cfg, 'catalog_type', None)
- if not catalog_type:
- continue
- else:
- if cfgname == 'identity':
- # Keystone is a required service for tempest
- continue
- if catalog_type not in services:
- if getattr(CONF.service_available, codename_match[cfgname]):
- print('Endpoint type %s not found either disable service '
- '%s or fix the catalog_type in the config file' % (
- catalog_type, codename_match[cfgname]))
- else:
- if not getattr(CONF.service_available,
- codename_match[cfgname]):
- print('Endpoint type %s is available, service %s should be'
- ' set as available in the config file.' % (
- catalog_type, codename_match[cfgname]))
- else:
- avail_services.append(codename_match[cfgname])
- return avail_services
-
-
-def main(argv):
- print('Running config verification...')
- os = clients.ComputeAdminManager(interface='json')
- services = check_service_availability(os)
- results = {}
- for service in ['nova', 'nova_v3', 'cinder', 'neutron', 'swift']:
- if service == 'nova_v3' and 'nova' not in services:
- continue
- elif service not in services:
- continue
- results = verify_extensions(os, service, results)
- verify_keystone_api_versions(os)
- verify_glance_api_versions(os)
- verify_nova_api_versions(os)
- display_results(results)
-
-
-if __name__ == "__main__":
- main(sys.argv)
diff --git a/tox.ini b/tox.ini
index 5e8d283..6b4acc6 100644
--- a/tox.ini
+++ b/tox.ini
@@ -77,7 +77,7 @@
[testenv:stress]
sitepackages = True
commands =
- python -m tempest/stress/run_stress -a -d 3600 -S
+ run-tempest-stress -a -d 3600 -S
[testenv:venv]
commands = {posargs}
@@ -95,9 +95,10 @@
[hacking]
local-check-factory = tempest.hacking.checks.factory
+import_exceptions = tempest.services
[flake8]
# E125 is a won't fix until https://github.com/jcrocholl/pep8/issues/126 is resolved. For further detail see https://review.openstack.org/#/c/36788/
-ignore = E125,H302,H404
+ignore = E125,H404
show-source = True
exclude = .git,.venv,.tox,dist,doc,openstack,*egg