Merge "Cleanup: Add common "ssh-login server" method"
diff --git a/requirements.txt b/requirements.txt
index cc61b01..06db0e6 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -13,6 +13,7 @@
python-novaclient>=2.10.0
python-neutronclient>=2.2.3,<3.0.0
python-cinderclient>=1.0.4
+python-heatclient>=0.2.3
testresources
keyring
testrepository
diff --git a/tempest/api/compute/admin/test_fixed_ips.py b/tempest/api/compute/admin/test_fixed_ips.py
index 8b96370..895f773 100644
--- a/tempest/api/compute/admin/test_fixed_ips.py
+++ b/tempest/api/compute/admin/test_fixed_ips.py
@@ -15,8 +15,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import testtools
-
from tempest.api.compute import base
from tempest import config
from tempest import exceptions
@@ -30,6 +28,9 @@
@classmethod
def setUpClass(cls):
super(FixedIPsBase, cls).setUpClass()
+ if cls.config.service_available.neutron:
+ msg = ("%s skipped as neutron is available" % cls.__name__)
+ raise cls.skipException(msg)
# NOTE(maurosr): The idea here is: the server creation is just an
# auxiliary element to the ip details or reservation, there was no way
# (at least none in my mind) to get an valid and existing ip except
@@ -56,8 +57,6 @@
CONF = config.TempestConfig()
- @testtools.skipIf(CONF.service_available.neutron, "This feature is not" +
- "implemented by Neutron. See bug: #1194569")
@attr(type='gate')
def test_list_fixed_ip_details(self):
resp, fixed_ip = self.client.get_fixed_ip_details(self.ip)
diff --git a/tempest/api/compute/admin/test_flavors.py b/tempest/api/compute/admin/test_flavors.py
index 69e15f7..5f31084 100644
--- a/tempest/api/compute/admin/test_flavors.py
+++ b/tempest/api/compute/admin/test_flavors.py
@@ -288,7 +288,7 @@
r, flavors = self.client.list_flavors_with_detail(params)
self.assertEqual(r.status, 200)
flavor = _flavor_lookup(flavors, flavor_name)
- self.assertNotEqual(flavor, None)
+ self.assertIsNotNone(flavor)
_test_string_variations(['f', 'false', 'no', '0'],
flavor_name_not_public)
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 893d9e0..25df6e6 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -225,7 +225,7 @@
resp, output = self.servers_client.get_console_output(
self.server_id, 10)
self.assertEqual(200, resp.status)
- self.assertNotEqual(output, None)
+ self.assertIsNotNone(output)
lines = len(output.split('\n'))
self.assertEqual(lines, 10)
self.wait_for(get_output)
@@ -249,7 +249,7 @@
resp, output = self.servers_client.get_console_output(self.server_id,
10)
self.assertEqual(200, resp.status)
- self.assertNotEqual(output, None)
+ self.assertIsNotNone(output)
lines = len(output.split('\n'))
self.assertEqual(lines, 10)
diff --git a/tempest/api/compute/servers/test_virtual_interfaces.py b/tempest/api/compute/servers/test_virtual_interfaces.py
index 2a5be8c..e5ea30e 100644
--- a/tempest/api/compute/servers/test_virtual_interfaces.py
+++ b/tempest/api/compute/servers/test_virtual_interfaces.py
@@ -45,7 +45,7 @@
# for a given server_id
resp, output = self.client.list_virtual_interfaces(self.server_id)
self.assertEqual(200, resp.status)
- self.assertNotEqual(output, None)
+ self.assertIsNotNone(output)
virt_ifaces = output
self.assertNotEqual(0, len(virt_ifaces['virtual_interfaces']),
'Expected virtual interfaces, got 0 interfaces.')
diff --git a/tempest/api/object_storage/test_account_quotas.py b/tempest/api/object_storage/test_account_quotas.py
index bc050dc..65fe1ac 100644
--- a/tempest/api/object_storage/test_account_quotas.py
+++ b/tempest/api/object_storage/test_account_quotas.py
@@ -75,22 +75,27 @@
cls.data.test_password,
cls.data.test_tenant)
- headers = {"X-Auth-Token": cls.reselleradmin_token,
+ def setUp(self):
+ super(AccountQuotasTest, self).setUp()
+
+ # Set a quota of 20 bytes on the user's account before each test
+ headers = {"X-Auth-Token": self.reselleradmin_token,
"X-Account-Meta-Quota-Bytes": "20"}
- cls.os.custom_account_client.request("POST", "", headers, "")
+ self.os.custom_account_client.request("POST", "", headers, "")
+
+ def tearDown(self):
+ # remove the quota from the container
+ headers = {"X-Auth-Token": self.reselleradmin_token,
+ "X-Remove-Account-Meta-Quota-Bytes": "x"}
+
+ self.os.custom_account_client.request("POST", "", headers, "")
+ super(AccountQuotasTest, self).tearDown()
@classmethod
def tearDownClass(cls):
cls.delete_containers([cls.container_name])
cls.data.teardown_all()
-
- # remove the quota from the container
- headers = {"X-Auth-Token": cls.reselleradmin_token,
- "X-Remove-Account-Meta-Quota-Bytes": "x"}
-
- cls.os.custom_account_client.request("POST", "", headers, "")
-
super(AccountQuotasTest, cls).tearDownClass()
@testtools.skipIf(not accounts_quotas_available,
@@ -113,3 +118,45 @@
self.assertRaises(exceptions.OverLimit,
self.object_client.create_object,
self.container_name, object_name, data)
+
+ @testtools.skipIf(not accounts_quotas_available,
+ "Account Quotas middleware not available")
+ @attr(type=["smoke"])
+ def test_admin_modify_quota(self):
+ """Test that the ResellerAdmin is able to modify and remove the quota
+ on a user's account.
+
+ Using the custom_account client, the test modifies the quota
+ successively to:
+
+ * "25": a random value different from the initial quota value.
+ * "" : an empty value, equivalent to the removal of the quota.
+ * "20": set the quota to its initial value.
+ """
+ for quota in ("25", "", "20"):
+
+ headers = {"X-Auth-Token": self.reselleradmin_token,
+ "X-Account-Meta-Quota-Bytes": quota}
+
+ resp, _ = self.os.custom_account_client.request("POST", "",
+ headers, "")
+
+ self.assertEqual(resp["status"], "204")
+
+ @testtools.skipIf(not accounts_quotas_available,
+ "Account Quotas middleware not available")
+ @attr(type=["negative", "smoke"])
+ def test_user_modify_quota(self):
+ """Test that a user is not able to modify or remove a quota on
+ its account.
+ """
+
+ # Not able to remove quota
+ self.assertRaises(exceptions.Unauthorized,
+ self.account_client.create_account_metadata,
+ {"Quota-Bytes": ""})
+
+ # Not able to modify quota
+ self.assertRaises(exceptions.Unauthorized,
+ self.account_client.create_account_metadata,
+ {"Quota-Bytes": "100"})
diff --git a/tempest/cli/__init__.py b/tempest/cli/__init__.py
index b34d516..08f585a 100644
--- a/tempest/cli/__init__.py
+++ b/tempest/cli/__init__.py
@@ -134,6 +134,11 @@
for field in field_names:
self.assertIn(field, item)
+ def assertFirstLineStartsWith(self, lines, beginning):
+ self.assertTrue(lines[0].startswith(beginning),
+ msg=('Beginning of first line has invalid content: %s'
+ % lines[:3]))
+
class CommandFailed(subprocess.CalledProcessError):
# adds output attribute for python2.6
diff --git a/tempest/cli/simple_read_only/test_cinder.py b/tempest/cli/simple_read_only/test_cinder.py
index e9ce87b..21acae8 100644
--- a/tempest/cli/simple_read_only/test_cinder.py
+++ b/tempest/cli/simple_read_only/test_cinder.py
@@ -87,7 +87,7 @@
def test_admin_help(self):
help_text = self.cinder('help')
lines = help_text.split('\n')
- self.assertTrue(lines[0].startswith('usage: cinder'))
+ self.assertFirstLineStartsWith(lines, 'usage: cinder')
commands = []
cmds_start = lines.index('Positional arguments:')
diff --git a/tempest/cli/simple_read_only/test_glance.py b/tempest/cli/simple_read_only/test_glance.py
index 3d58451..d02c60b 100644
--- a/tempest/cli/simple_read_only/test_glance.py
+++ b/tempest/cli/simple_read_only/test_glance.py
@@ -48,7 +48,7 @@
def test_glance_help(self):
help_text = self.glance('help')
lines = help_text.split('\n')
- self.assertTrue(lines[0].startswith('usage: glance'))
+ self.assertFirstLineStartsWith(lines, 'usage: glance')
commands = []
cmds_start = lines.index('Positional arguments:')
diff --git a/tempest/cli/simple_read_only/test_keystone.py b/tempest/cli/simple_read_only/test_keystone.py
index 4c7982b..1e8009f 100644
--- a/tempest/cli/simple_read_only/test_keystone.py
+++ b/tempest/cli/simple_read_only/test_keystone.py
@@ -46,7 +46,9 @@
out = self.keystone('catalog')
catalog = self.parser.details_multiple(out, with_label=True)
for svc in catalog:
- self.assertTrue(svc['__label'].startswith('Service:'))
+ self.assertTrue(svc['__label'].startswith('Service:'),
+ msg=('Invalid beginning of service block: %s' %
+ svc['__label']))
def test_admin_endpoint_list(self):
out = self.keystone('endpoint-list')
@@ -94,7 +96,7 @@
def test_admin_help(self):
help_text = self.keystone('help')
lines = help_text.split('\n')
- self.assertTrue(lines[0].startswith('usage: keystone'))
+ self.assertFirstLineStartsWith(lines, 'usage: keystone')
commands = []
cmds_start = lines.index('Positional arguments:')
diff --git a/tempest/cli/simple_read_only/test_neutron.py b/tempest/cli/simple_read_only/test_neutron.py
index 7b8340d..ae3a1a7 100644
--- a/tempest/cli/simple_read_only/test_neutron.py
+++ b/tempest/cli/simple_read_only/test_neutron.py
@@ -92,7 +92,7 @@
def test_neutron_help(self):
help_text = self.neutron('help')
lines = help_text.split('\n')
- self.assertTrue(lines[0].startswith('usage: neutron'))
+ self.assertFirstLineStartsWith(lines, 'usage: neutron')
commands = []
cmds_start = lines.index('Commands for API v2.0:')
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index f853849..8290021 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -16,11 +16,13 @@
# License for the specific language governing permissions and limitations
# under the License.
+import os
import subprocess
# Default client libs
import cinderclient.client
import glanceclient
+import heatclient.client
import keystoneclient.v2_0.client
import netaddr
from neutronclient.common import exceptions as exc
@@ -49,6 +51,7 @@
NOVACLIENT_VERSION = '2'
CINDERCLIENT_VERSION = '1'
+ HEATCLIENT_VERSION = '1'
def __init__(self, username, password, tenant_name):
super(OfficialClientManager, self).__init__()
@@ -63,6 +66,10 @@
self.volume_client = self._get_volume_client(username,
password,
tenant_name)
+ self.orchestration_client = self._get_orchestration_client(
+ username,
+ password,
+ tenant_name)
def _get_compute_client(self, username, password, tenant_name):
# Novaclient will not execute operations for anyone but the
@@ -99,6 +106,32 @@
tenant_name,
auth_url)
+ def _get_orchestration_client(self, username=None, password=None,
+ tenant_name=None):
+ if not username:
+ username = self.config.identity.admin_username
+ if not password:
+ password = self.config.identity.admin_password
+ if not tenant_name:
+ tenant_name = self.config.identity.tenant_name
+
+ self._validate_credentials(username, password, tenant_name)
+
+ keystone = self._get_identity_client(username, password, tenant_name)
+ token = keystone.auth_token
+ try:
+ endpoint = keystone.service_catalog.url_for(
+ service_type='orchestration',
+ endpoint_type='publicURL')
+ except keystoneclient.exceptions.EndpointNotFound:
+ return None
+ else:
+ return heatclient.client.Client(self.HEATCLIENT_VERSION,
+ endpoint,
+ token=token,
+ username=username,
+ password=password)
+
def _get_identity_client(self, username, password, tenant_name):
# This identity client is not intended to check the security
# of the identity service, so use admin credentials by default.
@@ -154,13 +187,8 @@
super(OfficialClientTest, cls).setUpClass()
cls.isolated_creds = isolated_creds.IsolatedCreds(
__name__, tempest_client=False)
- if cls.config.compute.allow_tenant_isolation:
- creds = cls.isolated_creds.get_primary_creds()
- username, tenant_name, password = creds
- else:
- username = cls.config.identity.username
- password = cls.config.identity.password
- tenant_name = cls.config.identity.tenant_name
+
+ username, tenant_name, password = cls.credentials()
cls.manager = OfficialClientManager(username, password, tenant_name)
cls.compute_client = cls.manager.compute_client
@@ -168,10 +196,21 @@
cls.identity_client = cls.manager.identity_client
cls.network_client = cls.manager.network_client
cls.volume_client = cls.manager.volume_client
+ cls.orchestration_client = cls.manager.orchestration_client
cls.resource_keys = {}
cls.os_resources = []
@classmethod
+ def credentials(cls):
+ if cls.config.compute.allow_tenant_isolation:
+ return cls.isolated_creds.get_primary_creds()
+
+ username = cls.config.identity.username
+ password = cls.config.identity.password
+ tenant_name = cls.config.identity.tenant_name
+ return username, tenant_name, password
+
+ @classmethod
def tearDownClass(cls):
# NOTE(jaypipes): Because scenario tests are typically run in a
# specific order, and because test methods in scenario tests
@@ -527,3 +566,30 @@
timeout=self.config.compute.ssh_timeout),
'Auth failure in connecting to %s@%s via ssh' %
(username, ip_address))
+
+
+class OrchestrationScenarioTest(OfficialClientTest):
+ """
+ Base class for orchestration scenario tests
+ """
+
+ @classmethod
+ def credentials(cls):
+ username = cls.config.identity.admin_username
+ password = cls.config.identity.admin_password
+ tenant_name = cls.config.identity.tenant_name
+ return username, tenant_name, password
+
+ def _load_template(self, base_file, file_name):
+ filepath = os.path.join(os.path.dirname(os.path.realpath(base_file)),
+ file_name)
+ with open(filepath) as f:
+ return f.read()
+
+ @classmethod
+ def _stack_rand_name(cls):
+ return rand_name(cls.__name__ + '-')
+
+ def _create_keypair(self):
+ kp_name = rand_name('keypair-smoke')
+ return self.compute_client.keypairs.create(kp_name)
diff --git a/tempest/scenario/orchestration/__init__.py b/tempest/scenario/orchestration/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/scenario/orchestration/__init__.py
diff --git a/tempest/scenario/orchestration/test_autoscaling.py b/tempest/scenario/orchestration/test_autoscaling.py
new file mode 100644
index 0000000..cd959a8
--- /dev/null
+++ b/tempest/scenario/orchestration/test_autoscaling.py
@@ -0,0 +1,108 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.openstack.common import log as logging
+from tempest.scenario import manager
+from tempest.test import attr
+from tempest.test import call_until_true
+import time
+
+
+LOG = logging.getLogger(__name__)
+
+
+class AutoScalingTest(manager.OrchestrationScenarioTest):
+
+ def setUp(self):
+ super(AutoScalingTest, self).setUp()
+ if not self.config.orchestration.image_ref:
+ raise self.skipException("No image available to test")
+ self.client = self.orchestration_client
+
+ def assign_keypair(self):
+ self.stack_name = self._stack_rand_name()
+ if self.config.orchestration.keypair_name:
+ self.keypair_name = self.config.orchestration.keypair_name
+ else:
+ self.keypair = self._create_keypair()
+ self.keypair_name = self.keypair.id
+ self.set_resource('keypair', self.keypair)
+
+ def launch_stack(self):
+ self.parameters = {
+ 'KeyName': self.keypair_name,
+ 'InstanceType': self.config.orchestration.instance_type,
+ 'ImageId': self.config.orchestration.image_ref,
+ 'StackStart': str(time.time())
+ }
+
+ # create the stack
+ self.template = self._load_template(__file__, 'test_autoscaling.yaml')
+ self.client.stacks.create(
+ stack_name=self.stack_name,
+ template=self.template,
+ parameters=self.parameters)
+
+ self.stack = self.client.stacks.get(self.stack_name)
+ self.stack_identifier = '%s/%s' % (self.stack_name, self.stack.id)
+
+ # if a keypair was set, do not delete the stack on exit to allow
+ # for manual post-mortums
+ if not self.config.orchestration.keypair_name:
+ self.set_resource('stack', self.stack)
+
+ @attr(type='slow')
+ def test_scale_up_then_down(self):
+
+ self.assign_keypair()
+ self.launch_stack()
+
+ sid = self.stack_identifier
+ timeout = self.config.orchestration.build_timeout
+ interval = 10
+
+ self.assertEqual('CREATE', self.stack.action)
+ # wait for create to complete.
+ self.status_timeout(self.client.stacks, sid, 'COMPLETE')
+
+ self.stack.get()
+ self.assertEqual('CREATE_COMPLETE', self.stack.stack_status)
+
+ # the resource SmokeServerGroup is implemented as a nested
+ # stack, so servers can be counted by counting the resources
+ # inside that nested stack
+ resource = self.client.resources.get(sid, 'SmokeServerGroup')
+ nested_stack_id = resource.physical_resource_id
+
+ def server_count():
+ # the number of servers is the number of resources
+ # in the nexted stack
+ self.server_count = len(
+ self.client.resources.list(nested_stack_id))
+ return self.server_count
+
+ def assertScale(from_servers, to_servers):
+ call_until_true(lambda: server_count() == to_servers,
+ timeout, interval)
+ self.assertEqual(to_servers, self.server_count,
+ 'Failed scaling from %d to %d servers' % (
+ from_servers, to_servers))
+
+ # he marched them up to the top of the hill
+ assertScale(1, 2)
+ assertScale(2, 3)
+
+ # and he marched them down again
+ assertScale(3, 2)
+ assertScale(2, 1)
diff --git a/tempest/scenario/orchestration/test_autoscaling.yaml b/tempest/scenario/orchestration/test_autoscaling.yaml
new file mode 100644
index 0000000..045b3bc
--- /dev/null
+++ b/tempest/scenario/orchestration/test_autoscaling.yaml
@@ -0,0 +1,182 @@
+HeatTemplateFormatVersion: '2012-12-12'
+Description: |
+ Template which tests autoscaling and load balancing
+Parameters:
+ KeyName:
+ Type: String
+ InstanceType:
+ Type: String
+ ImageId:
+ Type: String
+ StackStart:
+ Description: Epoch seconds when the stack was launched
+ Type: Number
+ ConsumeStartSeconds:
+ Description: Seconds after invocation when memory should be consumed
+ Type: Number
+ Default: '60'
+ ConsumeStopSeconds:
+ Description: Seconds after StackStart when memory should be released
+ Type: Number
+ Default: '420'
+ ScaleUpThreshold:
+ Description: Memory percentage threshold to scale up on
+ Type: Number
+ Default: '70'
+ ScaleDownThreshold:
+ Description: Memory percentage threshold to scale down on
+ Type: Number
+ Default: '60'
+ ConsumeMemoryLimit:
+ Description: Memory percentage threshold to consume
+ Type: Number
+ Default: '71'
+Resources:
+ SmokeServerGroup:
+ Type: AWS::AutoScaling::AutoScalingGroup
+ Properties:
+ AvailabilityZones: {'Fn::GetAZs': ''}
+ LaunchConfigurationName: {Ref: LaunchConfig}
+ MinSize: '1'
+ MaxSize: '3'
+ SmokeServerScaleUpPolicy:
+ Type: AWS::AutoScaling::ScalingPolicy
+ Properties:
+ AdjustmentType: ChangeInCapacity
+ AutoScalingGroupName: {Ref: SmokeServerGroup}
+ Cooldown: '60'
+ ScalingAdjustment: '1'
+ SmokeServerScaleDownPolicy:
+ Type: AWS::AutoScaling::ScalingPolicy
+ Properties:
+ AdjustmentType: ChangeInCapacity
+ AutoScalingGroupName: {Ref: SmokeServerGroup}
+ Cooldown: '60'
+ ScalingAdjustment: '-1'
+ MEMAlarmHigh:
+ Type: AWS::CloudWatch::Alarm
+ Properties:
+ AlarmDescription: Scale-up if MEM > ScaleUpThreshold% for 10 seconds
+ MetricName: MemoryUtilization
+ Namespace: system/linux
+ Statistic: Average
+ Period: '10'
+ EvaluationPeriods: '1'
+ Threshold: {Ref: ScaleUpThreshold}
+ AlarmActions: [{Ref: SmokeServerScaleUpPolicy}]
+ Dimensions:
+ - Name: AutoScalingGroupName
+ Value: {Ref: SmokeServerGroup}
+ ComparisonOperator: GreaterThanThreshold
+ MEMAlarmLow:
+ Type: AWS::CloudWatch::Alarm
+ Properties:
+ AlarmDescription: Scale-down if MEM < ScaleDownThreshold% for 10 seconds
+ MetricName: MemoryUtilization
+ Namespace: system/linux
+ Statistic: Average
+ Period: '10'
+ EvaluationPeriods: '1'
+ Threshold: {Ref: ScaleDownThreshold}
+ AlarmActions: [{Ref: SmokeServerScaleDownPolicy}]
+ Dimensions:
+ - Name: AutoScalingGroupName
+ Value: {Ref: SmokeServerGroup}
+ ComparisonOperator: LessThanThreshold
+ CfnUser:
+ Type: AWS::IAM::User
+ SmokeKeys:
+ Type: AWS::IAM::AccessKey
+ Properties:
+ UserName: {Ref: CfnUser}
+ SmokeSecurityGroup:
+ Type: AWS::EC2::SecurityGroup
+ Properties:
+ GroupDescription: Standard firewall rules
+ SecurityGroupIngress:
+ - {IpProtocol: tcp, FromPort: '22', ToPort: '22', CidrIp: 0.0.0.0/0}
+ - {IpProtocol: tcp, FromPort: '80', ToPort: '80', CidrIp: 0.0.0.0/0}
+ LaunchConfig:
+ Type: AWS::AutoScaling::LaunchConfiguration
+ Metadata:
+ AWS::CloudFormation::Init:
+ config:
+ files:
+ /etc/cfn/cfn-credentials:
+ content:
+ Fn::Replace:
+ - $AWSAccessKeyId: {Ref: SmokeKeys}
+ $AWSSecretKey: {'Fn::GetAtt': [SmokeKeys, SecretAccessKey]}
+ - |
+ AWSAccessKeyId=$AWSAccessKeyId
+ AWSSecretKey=$AWSSecretKey
+ mode: '000400'
+ owner: root
+ group: root
+ /root/watch_loop:
+ content:
+ Fn::Replace:
+ - _hi_: {Ref: MEMAlarmHigh}
+ _lo_: {Ref: MEMAlarmLow}
+ - |
+ #!/bin/bash
+ while :
+ do
+ /opt/aws/bin/cfn-push-stats --watch _hi_ --mem-util
+ /opt/aws/bin/cfn-push-stats --watch _lo_ --mem-util
+ sleep 4
+ done
+ mode: '000700'
+ owner: root
+ group: root
+ /root/consume_memory:
+ content:
+ Fn::Replace:
+ - StackStart: {Ref: StackStart}
+ ConsumeStopSeconds: {Ref: ConsumeStopSeconds}
+ ConsumeStartSeconds: {Ref: ConsumeStartSeconds}
+ ConsumeMemoryLimit: {Ref: ConsumeMemoryLimit}
+ - |
+ #!/usr/bin/env python
+ import psutil
+ import time
+ import datetime
+ import sys
+ a = []
+ sleep_until_consume = ConsumeStartSeconds
+ stack_start = StackStart
+ consume_stop_time = stack_start + ConsumeStopSeconds
+ memory_limit = ConsumeMemoryLimit
+ if sleep_until_consume > 0:
+ sys.stdout.flush()
+ time.sleep(sleep_until_consume)
+ while psutil.virtual_memory().percent < memory_limit:
+ sys.stdout.flush()
+ a.append(' ' * 10**5)
+ time.sleep(0.1)
+ sleep_until_exit = consume_stop_time - time.time()
+ if sleep_until_exit > 0:
+ time.sleep(sleep_until_exit)
+ mode: '000700'
+ owner: root
+ group: root
+ Properties:
+ ImageId: {Ref: ImageId}
+ InstanceType: {Ref: InstanceType}
+ KeyName: {Ref: KeyName}
+ SecurityGroups: [{Ref: SmokeSecurityGroup}]
+ UserData:
+ Fn::Base64:
+ Fn::Replace:
+ - ConsumeStopSeconds: {Ref: ConsumeStopSeconds}
+ ConsumeStartSeconds: {Ref: ConsumeStartSeconds}
+ ConsumeMemoryLimit: {Ref: ConsumeMemoryLimit}
+ - |
+ #!/bin/bash -v
+ /opt/aws/bin/cfn-init
+ # report on memory consumption every 4 seconds
+ /root/watch_loop &
+ # wait ConsumeStartSeconds then ramp up memory consumption
+ # until it is over ConsumeMemoryLimit%
+ # then exits ConsumeStopSeconds seconds after stack launch
+ /root/consume_memory > /root/consume_memory.log &
\ No newline at end of file
diff --git a/tempest/services/compute/xml/flavors_client.py b/tempest/services/compute/xml/flavors_client.py
index 3a8986c..6fbb9e3 100644
--- a/tempest/services/compute/xml/flavors_client.py
+++ b/tempest/services/compute/xml/flavors_client.py
@@ -30,8 +30,6 @@
"http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1"
XMLNS_OS_FLV_ACCESS = \
"http://docs.openstack.org/compute/ext/flavor_access/api/v1.1"
-XMLNS_OS_FLV_WITH_EXT_SPECS = \
- "http://docs.openstack.org/compute/ext/flavor_with_extra_specs/api/v2.0"
class FlavorsClientXML(RestClientXML):
@@ -51,7 +49,7 @@
if k == '{%s}ephemeral' % XMLNS_OS_FLV_EXT_DATA:
k = 'OS-FLV-EXT-DATA:ephemeral'
- if k == '{%s}extra_specs' % XMLNS_OS_FLV_WITH_EXT_SPECS:
+ if k == 'extra_specs':
k = 'OS-FLV-WITH-EXT-SPECS:extra_specs'
flavor[k] = dict(v)
continue
diff --git a/tempest/stress/actions/unit_test.py b/tempest/stress/actions/unit_test.py
new file mode 100644
index 0000000..95cc1bc
--- /dev/null
+++ b/tempest/stress/actions/unit_test.py
@@ -0,0 +1,79 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.openstack.common import importutils
+import tempest.stress.stressaction as stressaction
+
+
+class SetUpClassRunTime(object):
+
+ process = 'process'
+ action = 'action'
+ application = 'application'
+
+ allowed = set((process, action, application))
+
+ @classmethod
+ def validate(cls, name):
+ if name not in cls.allowed:
+ raise KeyError("\'%s\' not a valid option" % name)
+
+
+class UnitTest(stressaction.StressAction):
+ """This is a special action for running existing unittests as stress test.
+ You need to pass ``test_method`` and ``class_setup_per``
+ using ``kwargs`` in the JSON descriptor;
+ ``test_method`` should be the fully qualified name of a unittest,
+ ``class_setup_per`` should be one from:
+ ``application``: once in the stress job lifetime
+ ``process``: once in the worker process lifetime
+ ``action``: on each action
+ Not all combination working in every case.
+ """
+
+ def setUp(self, **kwargs):
+ method = kwargs['test_method'].split('.')
+ self.test_method = method.pop()
+ self.klass = importutils.import_class('.'.join(method))
+ # valid options are 'process', 'application' , 'action'
+ self.class_setup_per = kwargs.get('class_setup_per',
+ SetUpClassRunTime.process)
+ SetUpClassRunTime.validate(self.class_setup_per)
+
+ if self.class_setup_per == SetUpClassRunTime.application:
+ self.klass.setUpClass()
+ self.setupclass_called = False
+
+ def run_core(self):
+ res = self.klass(self.test_method).run()
+ if res.errors:
+ raise RuntimeError(res.errors)
+
+ def run(self):
+ if self.class_setup_per != SetUpClassRunTime.application:
+ if (self.class_setup_per == SetUpClassRunTime.action
+ or self.setupclass_called is False):
+ self.klass.setUpClass()
+ self.setupclass_called = True
+
+ self.run_core()
+
+ if (self.class_setup_per == SetUpClassRunTime.action):
+ self.klass.tearDownClass()
+ else:
+ self.run_core()
+
+ def tearDown(self):
+ if self.class_setup_per != SetUpClassRunTime.action:
+ self.klass.tearDownClass()
diff --git a/tempest/stress/etc/sample-unit-test.json b/tempest/stress/etc/sample-unit-test.json
new file mode 100644
index 0000000..b388bfe
--- /dev/null
+++ b/tempest/stress/etc/sample-unit-test.json
@@ -0,0 +1,8 @@
+[{"action": "tempest.stress.actions.unit_test.UnitTest",
+ "threads": 8,
+ "use_admin": false,
+ "use_isolated_tenants": false,
+ "kwargs": {"test_method": "tempest.cli.simple_read_only.test_glance.SimpleReadOnlyGlanceClientTest.test_glance_fake_action",
+ "class_setup_per": "process"}
+ }
+]
diff --git a/tempest/test.py b/tempest/test.py
index 7787790..68cedf0 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -97,7 +97,7 @@
def validate_tearDownClass():
if at_exit_set:
- raise RuntimeError("tearDownClass does not calls the super's"
+ raise RuntimeError("tearDownClass does not calls the super's "
"tearDownClass in these classes: "
+ str(at_exit_set))
diff --git a/tools/skip_tracker.py b/tools/skip_tracker.py
index 1ed6961..c244808 100755
--- a/tools/skip_tracker.py
+++ b/tools/skip_tracker.py
@@ -61,7 +61,7 @@
"""
Return the skip tuples in a test file
"""
- BUG_RE = re.compile(r'.*skip\(.*bug:*\s*\#*(\d+)', re.IGNORECASE)
+ BUG_RE = re.compile(r'.*skip.*bug:*\s*\#*(\d+)', re.IGNORECASE)
DEF_RE = re.compile(r'.*def (\w+)\(')
bug_found = False
results = []
diff --git a/tox.ini b/tox.ini
index 471fecb..ea27b92 100644
--- a/tox.ini
+++ b/tox.ini
@@ -27,6 +27,13 @@
commands =
sh tools/pretty_tox.sh '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario|thirdparty|cli)) {posargs}'
+[testenv:heat-slow]
+sitepackages = True
+setenv = VIRTUAL_ENV={envdir}
+# The regex below is used to select heat api/scenario tests tagged as slow.
+commands =
+ sh tools/pretty_tox_serial.sh '(?=.*\[.*\bslow\b.*\])(^tempest\.(api|scenario)\.orchestration) {posargs}'
+
[testenv:py26-full]
sitepackages = True
setenv = VIRTUAL_ENV={envdir}