Yuiko Takada | ebcf6af | 2013-07-09 05:10:55 +0000 | [diff] [blame] | 1 | # vim: tabstop=4 shiftwidth=4 softtabstop=4 |
| 2 | |
| 3 | # Copyright 2013 NEC Corporation |
| 4 | # All Rights Reserved. |
| 5 | # |
| 6 | # Licensed under the Apache License, Version 2.0 (the "License"); you may |
| 7 | # not use this file except in compliance with the License. You may obtain |
| 8 | # a copy of the License at |
| 9 | # |
| 10 | # http://www.apache.org/licenses/LICENSE-2.0 |
| 11 | # |
| 12 | # Unless required by applicable law or agreed to in writing, software |
| 13 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
| 14 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
| 15 | # License for the specific language governing permissions and limitations |
| 16 | # under the License. |
| 17 | |
Sean Dague | fe8a609 | 2013-07-27 08:15:55 -0400 | [diff] [blame] | 18 | import time |
| 19 | |
Attila Fazekas | 70431ba | 2013-07-26 18:47:37 +0200 | [diff] [blame] | 20 | from cinderclient import exceptions as cinder_exceptions |
Sean Dague | fe8a609 | 2013-07-27 08:15:55 -0400 | [diff] [blame] | 21 | import testtools |
| 22 | |
Yuiko Takada | ebcf6af | 2013-07-09 05:10:55 +0000 | [diff] [blame] | 23 | from tempest.common.utils.data_utils import rand_name |
| 24 | from tempest.common.utils.linux.remote_client import RemoteClient |
Attila Fazekas | 70431ba | 2013-07-26 18:47:37 +0200 | [diff] [blame] | 25 | from tempest import exceptions |
Matthew Treinish | f4a9b0f | 2013-07-26 16:58:26 -0400 | [diff] [blame^] | 26 | from tempest.openstack.common import log as logging |
Yuiko Takada | ebcf6af | 2013-07-09 05:10:55 +0000 | [diff] [blame] | 27 | from tempest.scenario import manager |
Attila Fazekas | 70431ba | 2013-07-26 18:47:37 +0200 | [diff] [blame] | 28 | import tempest.test |
Yuiko Takada | ebcf6af | 2013-07-09 05:10:55 +0000 | [diff] [blame] | 29 | |
| 30 | LOG = logging.getLogger(__name__) |
| 31 | |
| 32 | |
| 33 | class TestStampPattern(manager.OfficialClientTest): |
| 34 | """ |
| 35 | This test is for snapshotting an instance/volume and attaching the volume |
| 36 | created from snapshot to the instance booted from snapshot. |
| 37 | The following is the scenario outline: |
| 38 | 1. Boot an instance "instance1" |
| 39 | 2. Create a volume "volume1" |
| 40 | 3. Attach volume1 to instance1 |
| 41 | 4. Create a filesystem on volume1 |
| 42 | 5. Mount volume1 |
| 43 | 6. Create a file which timestamp is written in volume1 |
| 44 | 7. Unmount volume1 |
| 45 | 8. Detach volume1 from instance1 |
| 46 | 9. Get a snapshot "snapshot_from_volume" of volume1 |
| 47 | 10. Get a snapshot "snapshot_from_instance" of instance1 |
| 48 | 11. Boot an instance "instance2" from snapshot_from_instance |
| 49 | 12. Create a volume "volume2" from snapshot_from_volume |
| 50 | 13. Attach volume2 to instance2 |
| 51 | 14. Check the existence of a file which created at 6. in volume2 |
| 52 | """ |
| 53 | |
| 54 | def _wait_for_server_status(self, server, status): |
| 55 | self.status_timeout(self.compute_client.servers, |
| 56 | server.id, |
| 57 | status) |
| 58 | |
| 59 | def _wait_for_image_status(self, image_id, status): |
| 60 | self.status_timeout(self.image_client.images, image_id, status) |
| 61 | |
| 62 | def _wait_for_volume_snapshot_status(self, volume_snapshot, status): |
| 63 | self.status_timeout(self.volume_client.volume_snapshots, |
| 64 | volume_snapshot.id, status) |
| 65 | |
| 66 | def _boot_image(self, image_id): |
| 67 | name = rand_name('scenario-server-') |
| 68 | client = self.compute_client |
| 69 | flavor_id = self.config.compute.flavor_ref |
| 70 | LOG.debug("name:%s, image:%s" % (name, image_id)) |
| 71 | server = client.servers.create(name=name, |
| 72 | image=image_id, |
| 73 | flavor=flavor_id, |
| 74 | key_name=self.keypair.name) |
| 75 | self.addCleanup(self.compute_client.servers.delete, server) |
| 76 | self.assertEqual(name, server.name) |
| 77 | self._wait_for_server_status(server, 'ACTIVE') |
| 78 | server = client.servers.get(server) # getting network information |
| 79 | LOG.debug("server:%s" % server) |
| 80 | return server |
| 81 | |
| 82 | def _add_keypair(self): |
| 83 | name = rand_name('scenario-keypair-') |
| 84 | self.keypair = self.compute_client.keypairs.create(name=name) |
| 85 | self.addCleanup(self.compute_client.keypairs.delete, self.keypair) |
| 86 | self.assertEqual(name, self.keypair.name) |
| 87 | |
| 88 | def _create_floating_ip(self): |
| 89 | floating_ip = self.compute_client.floating_ips.create() |
| 90 | self.addCleanup(floating_ip.delete) |
| 91 | return floating_ip |
| 92 | |
| 93 | def _add_floating_ip(self, server, floating_ip): |
| 94 | server.add_floating_ip(floating_ip) |
| 95 | |
| 96 | def _create_security_group_rule(self): |
| 97 | sgs = self.compute_client.security_groups.list() |
| 98 | for sg in sgs: |
| 99 | if sg.name == 'default': |
| 100 | secgroup = sg |
| 101 | |
| 102 | ruleset = { |
| 103 | # ssh |
| 104 | 'ip_protocol': 'tcp', |
| 105 | 'from_port': 22, |
| 106 | 'to_port': 22, |
| 107 | 'cidr': '0.0.0.0/0', |
| 108 | 'group_id': None |
| 109 | } |
| 110 | sg_rule = self.compute_client.security_group_rules.create(secgroup.id, |
| 111 | **ruleset) |
| 112 | self.addCleanup(self.compute_client.security_group_rules.delete, |
| 113 | sg_rule.id) |
| 114 | |
Attila Fazekas | 70431ba | 2013-07-26 18:47:37 +0200 | [diff] [blame] | 115 | def _remote_client_to_server(self, server_or_ip): |
Yuiko Takada | ebcf6af | 2013-07-09 05:10:55 +0000 | [diff] [blame] | 116 | if isinstance(server_or_ip, basestring): |
| 117 | ip = server_or_ip |
| 118 | else: |
| 119 | network_name_for_ssh = self.config.compute.network_for_ssh |
| 120 | ip = server_or_ip.networks[network_name_for_ssh][0] |
| 121 | username = self.config.scenario.ssh_user |
| 122 | linux_client = RemoteClient(ip, |
| 123 | username, |
| 124 | pkey=self.keypair.private_key) |
Attila Fazekas | 70431ba | 2013-07-26 18:47:37 +0200 | [diff] [blame] | 125 | return linux_client |
Yuiko Takada | ebcf6af | 2013-07-09 05:10:55 +0000 | [diff] [blame] | 126 | |
Attila Fazekas | 70431ba | 2013-07-26 18:47:37 +0200 | [diff] [blame] | 127 | def _ssh_to_server(self, server_or_ip): |
| 128 | linux_client = self._remote_client_to_server(server_or_ip) |
Yuiko Takada | ebcf6af | 2013-07-09 05:10:55 +0000 | [diff] [blame] | 129 | return linux_client.ssh_client |
| 130 | |
| 131 | def _create_image(self, server): |
| 132 | snapshot_name = rand_name('scenario-snapshot-') |
| 133 | create_image_client = self.compute_client.servers.create_image |
| 134 | image_id = create_image_client(server, snapshot_name) |
| 135 | self.addCleanup(self.image_client.images.delete, image_id) |
| 136 | self._wait_for_server_status(server, 'ACTIVE') |
| 137 | self._wait_for_image_status(image_id, 'active') |
| 138 | snapshot_image = self.image_client.images.get(image_id) |
| 139 | self.assertEquals(snapshot_name, snapshot_image.name) |
| 140 | return image_id |
| 141 | |
| 142 | def _create_volume_snapshot(self, volume): |
| 143 | snapshot_name = rand_name('scenario-snapshot-') |
| 144 | volume_snapshots = self.volume_client.volume_snapshots |
| 145 | snapshot = volume_snapshots.create( |
| 146 | volume.id, display_name=snapshot_name) |
| 147 | |
| 148 | def cleaner(): |
| 149 | volume_snapshots.delete(snapshot) |
| 150 | try: |
| 151 | while volume_snapshots.get(snapshot.id): |
| 152 | time.sleep(1) |
Attila Fazekas | 70431ba | 2013-07-26 18:47:37 +0200 | [diff] [blame] | 153 | except cinder_exceptions.NotFound: |
Yuiko Takada | ebcf6af | 2013-07-09 05:10:55 +0000 | [diff] [blame] | 154 | pass |
| 155 | self.addCleanup(cleaner) |
| 156 | self._wait_for_volume_status(volume, 'available') |
| 157 | self._wait_for_volume_snapshot_status(snapshot, 'available') |
| 158 | self.assertEquals(snapshot_name, snapshot.display_name) |
| 159 | return snapshot |
| 160 | |
| 161 | def _wait_for_volume_status(self, volume, status): |
| 162 | self.status_timeout( |
| 163 | self.volume_client.volumes, volume.id, status) |
| 164 | |
| 165 | def _create_volume(self, snapshot_id=None): |
| 166 | name = rand_name('scenario-volume-') |
| 167 | LOG.debug("volume display-name:%s" % name) |
| 168 | volume = self.volume_client.volumes.create(size=1, |
| 169 | display_name=name, |
| 170 | snapshot_id=snapshot_id) |
| 171 | LOG.debug("volume created:%s" % volume.display_name) |
| 172 | |
| 173 | def cleaner(): |
| 174 | self._wait_for_volume_status(volume, 'available') |
| 175 | self.volume_client.volumes.delete(volume) |
| 176 | self.addCleanup(cleaner) |
| 177 | self._wait_for_volume_status(volume, 'available') |
| 178 | self.assertEqual(name, volume.display_name) |
| 179 | return volume |
| 180 | |
| 181 | def _attach_volume(self, server, volume): |
| 182 | attach_volume_client = self.compute_client.volumes.create_server_volume |
| 183 | attached_volume = attach_volume_client(server.id, |
| 184 | volume.id, |
| 185 | '/dev/vdb') |
| 186 | self.assertEqual(volume.id, attached_volume.id) |
| 187 | self._wait_for_volume_status(attached_volume, 'in-use') |
| 188 | |
| 189 | def _detach_volume(self, server, volume): |
| 190 | detach_volume_client = self.compute_client.volumes.delete_server_volume |
| 191 | detach_volume_client(server.id, volume.id) |
| 192 | self._wait_for_volume_status(volume, 'available') |
| 193 | |
Attila Fazekas | 70431ba | 2013-07-26 18:47:37 +0200 | [diff] [blame] | 194 | def _wait_for_volume_availible_on_the_system(self, server_or_ip): |
| 195 | ssh = self._remote_client_to_server(server_or_ip) |
| 196 | conf = self.config |
| 197 | |
| 198 | def _func(): |
| 199 | part = ssh.get_partitions() |
| 200 | LOG.debug("Partitions:%s" % part) |
| 201 | return 'vdb' in part |
| 202 | |
| 203 | if not tempest.test.call_until_true(_func, |
| 204 | conf.compute.build_timeout, |
| 205 | conf.compute.build_interval): |
| 206 | raise exceptions.TimeoutException |
| 207 | |
Yuiko Takada | ebcf6af | 2013-07-09 05:10:55 +0000 | [diff] [blame] | 208 | def _create_timestamp(self, server_or_ip): |
| 209 | ssh_client = self._ssh_to_server(server_or_ip) |
| 210 | ssh_client.exec_command('sudo /usr/sbin/mkfs.ext4 /dev/vdb') |
| 211 | ssh_client.exec_command('sudo mount /dev/vdb /mnt') |
| 212 | ssh_client.exec_command('sudo sh -c "date > /mnt/timestamp;sync"') |
| 213 | self.timestamp = ssh_client.exec_command('sudo cat /mnt/timestamp') |
| 214 | ssh_client.exec_command('sudo umount /mnt') |
| 215 | |
| 216 | def _check_timestamp(self, server_or_ip): |
| 217 | ssh_client = self._ssh_to_server(server_or_ip) |
| 218 | ssh_client.exec_command('sudo mount /dev/vdb /mnt') |
| 219 | got_timestamp = ssh_client.exec_command('sudo cat /mnt/timestamp') |
| 220 | self.assertEqual(self.timestamp, got_timestamp) |
| 221 | |
Sean Dague | fe8a609 | 2013-07-27 08:15:55 -0400 | [diff] [blame] | 222 | @testtools.skip("Until Bug #1205344 is fixed") |
Yuiko Takada | ebcf6af | 2013-07-09 05:10:55 +0000 | [diff] [blame] | 223 | def test_stamp_pattern(self): |
| 224 | # prepare for booting a instance |
| 225 | self._add_keypair() |
| 226 | self._create_security_group_rule() |
| 227 | |
| 228 | # boot an instance and create a timestamp file in it |
| 229 | volume = self._create_volume() |
| 230 | server = self._boot_image(self.config.compute.image_ref) |
| 231 | |
| 232 | # create and add floating IP to server1 |
| 233 | if self.config.compute.use_floatingip_for_ssh: |
| 234 | floating_ip_for_server = self._create_floating_ip() |
| 235 | self._add_floating_ip(server, floating_ip_for_server) |
| 236 | ip_for_server = floating_ip_for_server.ip |
| 237 | else: |
| 238 | ip_for_server = server |
| 239 | |
| 240 | self._attach_volume(server, volume) |
Attila Fazekas | 70431ba | 2013-07-26 18:47:37 +0200 | [diff] [blame] | 241 | self._wait_for_volume_availible_on_the_system(ip_for_server) |
Yuiko Takada | ebcf6af | 2013-07-09 05:10:55 +0000 | [diff] [blame] | 242 | self._create_timestamp(ip_for_server) |
| 243 | self._detach_volume(server, volume) |
| 244 | |
| 245 | # snapshot the volume |
| 246 | volume_snapshot = self._create_volume_snapshot(volume) |
| 247 | |
| 248 | # snapshot the instance |
| 249 | snapshot_image_id = self._create_image(server) |
| 250 | |
| 251 | # create second volume from the snapshot(volume2) |
| 252 | volume_from_snapshot = self._create_volume( |
| 253 | snapshot_id=volume_snapshot.id) |
| 254 | |
| 255 | # boot second instance from the snapshot(instance2) |
| 256 | server_from_snapshot = self._boot_image(snapshot_image_id) |
| 257 | |
| 258 | # create and add floating IP to server_from_snapshot |
| 259 | if self.config.compute.use_floatingip_for_ssh: |
| 260 | floating_ip_for_snapshot = self._create_floating_ip() |
| 261 | self._add_floating_ip(server_from_snapshot, |
| 262 | floating_ip_for_snapshot) |
| 263 | ip_for_snapshot = floating_ip_for_snapshot.ip |
| 264 | else: |
| 265 | ip_for_snapshot = server_from_snapshot |
| 266 | |
| 267 | # attach volume2 to instance2 |
| 268 | self._attach_volume(server_from_snapshot, volume_from_snapshot) |
Attila Fazekas | 70431ba | 2013-07-26 18:47:37 +0200 | [diff] [blame] | 269 | self._wait_for_volume_availible_on_the_system(ip_for_snapshot) |
Yuiko Takada | ebcf6af | 2013-07-09 05:10:55 +0000 | [diff] [blame] | 270 | |
| 271 | # check the existence of the timestamp file in the volume2 |
| 272 | self._check_timestamp(ip_for_snapshot) |