blob: e5c61fb20c4912654c72285ae9ae545eef47bb53 [file] [log] [blame]
Attila Fazekasa23f5002012-10-23 19:32:45 +02001# vim: tabstop=4 shiftwidth=4 softtabstop=4
2
3# Copyright 2012 OpenStack, LLC
4# All Rights Reserved.
5#
6# Licensed under the Apache License, Version 2.0 (the "License"); you may
7# not use this file except in compliance with the License. You may obtain
8# a copy of the License at
9#
10# http://www.apache.org/licenses/LICENSE-2.0
11#
12# Unless required by applicable law or agreed to in writing, software
13# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15# License for the specific language governing permissions and limitations
16# under the License.
17
18import nose
19from nose.plugins.attrib import attr
20import unittest2 as unittest
21from tempest.testboto import BotoTestCase
22from tempest.tests.boto.utils.s3 import s3_upload_dir
23import tempest.tests.boto
24from tempest.common.utils.data_utils import rand_name
25from tempest.exceptions import EC2RegisterImageException
26from tempest.tests.boto.utils.wait import state_wait, re_search_wait
27from tempest import openstack
28from tempest.common.utils.linux.remote_client import RemoteClient
29from boto.s3.key import Key
30from contextlib import closing
31import logging
32
33
34LOG = logging.getLogger(__name__)
35
36
37@attr("S3", "EC2")
38class InstanceRunTest(BotoTestCase):
39
40 @classmethod
41 def setUpClass(cls):
42 super(InstanceRunTest, cls).setUpClass()
43 if not tempest.tests.boto.A_I_IMAGES_READY:
44 raise nose.SkipTest("".join(("EC2 ", cls.__name__,
45 ": requires ami/aki/ari manifest")))
46 cls.os = openstack.Manager()
47 cls.s3_client = cls.os.s3_client
48 cls.ec2_client = cls.os.ec2api_client
49 config = cls.os.config
50 cls.zone = cls.ec2_client.get_good_zone()
51 cls.materials_path = config.boto.s3_materials_path
52 ami_manifest = config.boto.ami_manifest
53 aki_manifest = config.boto.aki_manifest
54 ari_manifest = config.boto.ari_manifest
55 cls.instance_type = config.boto.instance_type
56 cls.bucket_name = rand_name("s3bucket-")
57 cls.keypair_name = rand_name("keypair-")
58 cls.keypair = cls.ec2_client.create_key_pair(cls.keypair_name)
59 cls.addResourceCleanUp(cls.ec2_client.delete_key_pair,
60 cls.keypair_name)
61 bucket = cls.s3_client.create_bucket(cls.bucket_name)
62 cls.addResourceCleanUp(cls.destroy_bucket,
63 cls.s3_client.connection_data,
64 cls.bucket_name)
65 s3_upload_dir(bucket, cls.materials_path)
66 cls.images = {"ami":
67 {"name": rand_name("ami-name-"),
68 "location": cls.bucket_name + "/" + ami_manifest},
69 "aki":
70 {"name": rand_name("aki-name-"),
71 "location": cls.bucket_name + "/" + aki_manifest},
72 "ari":
73 {"name": rand_name("ari-name-"),
74 "location": cls.bucket_name + "/" + ari_manifest}}
75 for image in cls.images.itervalues():
76 image["image_id"] = cls.ec2_client.register_image(
77 name=image["name"],
78 image_location=image["location"])
79 cls.addResourceCleanUp(cls.ec2_client.deregister_image,
80 image["image_id"])
81
82 for image in cls.images.itervalues():
83 def _state():
84 retr = cls.ec2_client.get_image(image["image_id"])
85 return retr.state
86 state = state_wait(_state, "available")
87 if state != "available":
88 for _image in cls.images.itervalues():
89 ec2_client.deregister_image(_image["image_id"])
90 raise RegisterImageException(image_id=image["image_id"])
91
92 @attr(type='smoke')
93 def test_run_stop_terminate_instance(self):
94 """EC2 run, stop and terminate instance"""
95 image_ami = self.ec2_client.get_image(self.images["ami"]
96 ["image_id"])
97 reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
98 ramdisk_id=self.images["ari"]["image_id"],
99 instance_type=self.instance_type)
100 rcuk = self.addResourceCleanUp(self.destroy_reservation, reservation)
101
102 def _state():
103 instance.update(validate=True)
104 return instance.state
105
106 for instance in reservation.instances:
107 LOG.info("state: %s", instance.state)
108 if instance.state != "running":
109 self.assertInstanceStateWait(_state, "running")
110
111 for instance in reservation.instances:
112 instance.stop()
113 LOG.info("state: %s", instance.state)
114 if instance.state != "stopped":
115 self.assertInstanceStateWait(_state, "stopped")
116
117 for instance in reservation.instances:
118 instance.terminate()
119 self.cancelResourceCleanUp(rcuk)
120
121 @attr(type='smoke')
122 def test_run_terminate_instance(self):
123 """EC2 run, terminate immediately"""
124 image_ami = self.ec2_client.get_image(self.images["ami"]
125 ["image_id"])
126 reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
127 ramdisk_id=self.images["ari"]["image_id"],
128 instance_type=self.instance_type)
129
130 for instance in reservation.instances:
131 instance.terminate()
132
133 instance.update(validate=True)
134 self.assertNotEqual(instance.state, "running")
135
136 #NOTE(afazekas): doctored test case,
137 # with normal validation it would fail
138 @attr("slow", type='smoke')
139 def test_integration_1(self):
140 """EC2 1. integration test (not strict)"""
141 image_ami = self.ec2_client.get_image(self.images["ami"]["image_id"])
142 sec_group_name = rand_name("securitygroup-")
143 group_desc = sec_group_name + " security group description "
144 security_group = self.ec2_client.create_security_group(sec_group_name,
145 group_desc)
146 self.addResourceCleanUp(self.destroy_security_group_wait,
147 security_group)
148 self.ec2_client.authorize_security_group(sec_group_name,
149 ip_protocol="icmp",
150 cidr_ip="0.0.0.0/0",
151 from_port=-1,
152 to_port=-1)
153 self.ec2_client.authorize_security_group(sec_group_name,
154 ip_protocol="tcp",
155 cidr_ip="0.0.0.0/0",
156 from_port=22,
157 to_port=22)
158 reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
159 ramdisk_id=self.images["ari"]["image_id"],
160 instance_type=self.instance_type,
161 key_name=self.keypair_name,
162 security_groups=(sec_group_name,))
163 self.addResourceCleanUp(self.destroy_reservation,
164 reservation)
165 volume = self.ec2_client.create_volume(1, self.zone)
166 self.addResourceCleanUp(self.destroy_volume_wait, volume)
167 instance = reservation.instances[0]
168
169 def _instance_state():
170 instance.update(validate=True)
171 return instance.state
172
173 def _volume_state():
174 volume.update(validate=True)
175 return volume.status
176
177 LOG.info("state: %s", instance.state)
178 if instance.state != "running":
179 self.assertInstanceStateWait(_instance_state, "running")
180
181 address = self.ec2_client.allocate_address()
182 rcuk_a = self.addResourceCleanUp(address.delete)
183 address.associate(instance.id)
184
185 rcuk_da = self.addResourceCleanUp(address.disassociate)
186 #TODO(afazekas): ping test. dependecy/permission ?
187
188 self.assertVolumeStatusWait(_volume_state, "available")
189 #NOTE(afazekas): it may be reports availble before it is available
190
191 ssh = RemoteClient(address.public_ip,
192 self.os.config.compute.ssh_user,
193 pkey=self.keypair.material)
194 text = rand_name("Pattern text for console output -")
195 resp = ssh.write_to_console(text)
196 self.assertFalse(resp)
197
198 def _output():
199 output = instance.get_console_output()
200 return output.output
201
202 re_search_wait(_output, text)
203 part_lines = ssh.get_partitions().split('\n')
204 # "attaching" invalid EC2 state ! #1074901
205 volume.attach(instance.id, "/dev/vdh")
206
207 #self.assertVolumeStatusWait(_volume_state, "in-use") # #1074901
208 re_search_wait(_volume_state, "in-use")
209
210 #NOTE(afazekas): Different Hypervisor backends names
211 # differently the devices,
212 # now we just test is the partition number increased/decrised
213
214 def _part_state():
215 current = ssh.get_partitions().split('\n')
216 if current > part_lines:
217 return 'INCREASE'
218 if current < part_lines:
219 return 'DECREASE'
220 return 'EQUAL'
221
222 state_wait(_part_state, 'INCREASE')
223 part_lines = ssh.get_partitions().split('\n')
224
225 #TODO(afazekas): Resource compare to the flavor settings
226
227 volume.detach() # "detaching" invalid EC2 status #1074901
228
229 #self.assertVolumeStatusWait(_volume_state, "available")
230 re_search_wait(_volume_state, "available")
231 LOG.info("Volume %s state: %s", volume.id, volume.status)
232
233 state_wait(_part_state, 'DECREASE')
234
235 instance.stop()
236 address.disassociate()
237 self.assertAddressDissasociatedWait(address)
238 self.cancelResourceCleanUp(rcuk_da)
239 address.release()
240 self.assertAddressReleasedWait(address)
241 self.cancelResourceCleanUp(rcuk_a)
242
243 LOG.info("state: %s", instance.state)
244 if instance.state != "stopped":
245 self.assertInstanceStateWait(_instance_state, "stopped")
246 #TODO(afazekas): move steps from teardown to the test case
247
248
249#TODO(afazekas): Snapshot/volume read/write test case