Peter Pentchev | 0be59a3 | 2022-06-06 09:09:57 +0300 | [diff] [blame] | 1 | commit 5563b42f8ba435bc45e08399c77ac350156d9aa2 |
| 2 | Author: Peter Penchev <openstack-dev@storpool.com> |
| 3 | Date: Wed Apr 20 15:47:39 2022 +0300 |
| 4 | |
| 5 | StorPool: create_cloned_volume() improvements |
| 6 | |
| 7 | If the source and destination volumes are in the same StorPool template |
| 8 | (as defined by either the volume type or the global config setting), |
| 9 | forego the need to create the transient snapshot at all and use |
| 10 | StorPool's "base this volume on that one" API call (which does the same |
| 11 | thing internally, but much more efficiently and atomically). |
| 12 | |
| 13 | If the destination volume should be in a different StorPool template, |
| 14 | then make sure that the transient snapshot is also in that template so |
| 15 | that, if other volumes are cloned from the same source volume later, |
| 16 | they can all use the same data underneath (the internal workings of |
| 17 | StorPool will detect that all those snapshots are exactly the same and |
| 18 | not duplicate any data in the destination template). This will avoid |
| 19 | data duplication, sometimes with drastic results. |
| 20 | |
| 21 | Bump the minimum required version of the "storpool" third-party library |
| 22 | for snapshotUpdate(template=...) support. |
| 23 | |
| 24 | Change-Id: Ib9bb76cf2e2f2b035b92e596b1ef185558b190d6 |
| 25 | |
| 26 | diff --git a/cinder/tests/unit/volume/drivers/test_storpool.py b/cinder/tests/unit/volume/drivers/test_storpool.py |
| 27 | index 843283db4..51db7f292 100644 |
| 28 | --- a/cinder/tests/unit/volume/drivers/test_storpool.py |
| 29 | +++ b/cinder/tests/unit/volume/drivers/test_storpool.py |
| 30 | @@ -1,4 +1,4 @@ |
| 31 | -# Copyright 2014 - 2017, 2019 - 2021 StorPool |
| 32 | +# Copyright 2014 - 2017, 2019 - 2022 StorPool |
| 33 | # All Rights Reserved. |
| 34 | # |
| 35 | # Licensed under the Apache License, Version 2.0 (the "License"); you may |
| 36 | @@ -14,6 +14,7 @@ |
| 37 | # under the License. |
| 38 | |
| 39 | |
| 40 | +import itertools |
| 41 | import re |
| 42 | import sys |
| 43 | from unittest import mock |
| 44 | @@ -110,13 +111,33 @@ class MockAPI(object): |
| 45 | def snapshotCreate(self, vname, snap): |
| 46 | snapshots[snap['name']] = dict(volumes[vname]) |
| 47 | |
| 48 | + def snapshotUpdate(self, snap, data): |
| 49 | + sdata = snapshots[snap] |
| 50 | + sdata.update(data) |
| 51 | + |
| 52 | def snapshotDelete(self, name): |
| 53 | del snapshots[name] |
| 54 | |
| 55 | def volumeCreate(self, v): |
| 56 | - if v['name'] in volumes: |
| 57 | + name = v['name'] |
| 58 | + if name in volumes: |
| 59 | raise MockApiError('volume already exists') |
| 60 | - volumes[v['name']] = v |
| 61 | + data = dict(v) |
| 62 | + |
| 63 | + if 'parent' in v and 'template' not in v: |
| 64 | + sdata = snapshots[v['parent']] |
| 65 | + if 'template' in sdata: |
| 66 | + data['template'] = sdata['template'] |
| 67 | + |
| 68 | + if 'baseOn' in v and 'template' not in v: |
| 69 | + vdata = volumes[v['baseOn']] |
| 70 | + if 'template' in vdata: |
| 71 | + data['template'] = vdata['template'] |
| 72 | + |
| 73 | + if 'template' not in data: |
| 74 | + data['template'] = None |
| 75 | + |
| 76 | + volumes[name] = data |
| 77 | |
| 78 | def volumeDelete(self, name): |
| 79 | del volumes[name] |
| 80 | @@ -171,6 +192,23 @@ fakeStorPool.spopenstack.AttachDB = MockAttachDB |
| 81 | fakeStorPool.sptypes.VolumeUpdateDesc = MockVolumeUpdateDesc |
| 82 | |
| 83 | |
| 84 | +class MockVolumeDB(object): |
| 85 | + """Simulate a Cinder database with a volume_get() method.""" |
| 86 | + |
| 87 | + def __init__(self, vol_types=None): |
| 88 | + """Store the specified volume types mapping if necessary.""" |
| 89 | + self.vol_types = vol_types if vol_types is not None else {} |
| 90 | + |
| 91 | + def volume_get(self, _context, vid): |
| 92 | + """Get a volume-like structure, only the fields we care about.""" |
| 93 | + # Still, try to at least make sure we know about that volume |
| 94 | + return { |
| 95 | + 'id': vid, |
| 96 | + 'size': volumes[volumeName(vid)]['size'], |
| 97 | + 'volume_type': self.vol_types.get(vid), |
| 98 | + } |
| 99 | + |
| 100 | + |
| 101 | @ddt.ddt |
| 102 | class StorPoolTestCase(test.TestCase): |
| 103 | |
| 104 | @@ -260,6 +298,11 @@ class StorPoolTestCase(test.TestCase): |
| 105 | self.assertListEqual(sorted([volumeName(n) for n in names]), |
| 106 | sorted(volumes.keys())) |
| 107 | |
| 108 | + def assertSnapshotNames(self, specs): |
| 109 | + self.assertListEqual( |
| 110 | + sorted(snapshotName(spec[0], spec[1]) for spec in specs), |
| 111 | + sorted(snapshots.keys())) |
| 112 | + |
| 113 | @mock_volume_types |
| 114 | def test_create_delete_volume(self): |
| 115 | self.assertVolumeNames([]) |
| 116 | @@ -272,7 +315,7 @@ class StorPoolTestCase(test.TestCase): |
| 117 | self.assertVolumeNames(('1',)) |
| 118 | v = volumes[volumeName('1')] |
| 119 | self.assertEqual(1 * units.Gi, v['size']) |
| 120 | - self.assertNotIn('template', v.keys()) |
| 121 | + self.assertIsNone(v['template']) |
| 122 | self.assertEqual(3, v['replication']) |
| 123 | |
| 124 | caught = False |
| 125 | @@ -292,7 +335,7 @@ class StorPoolTestCase(test.TestCase): |
| 126 | self.assertVolumeNames(('1',)) |
| 127 | v = volumes[volumeName('1')] |
| 128 | self.assertEqual(2 * units.Gi, v['size']) |
| 129 | - self.assertNotIn('template', v.keys()) |
| 130 | + self.assertIsNone(v['template']) |
| 131 | self.assertEqual(3, v['replication']) |
| 132 | |
| 133 | self.driver.create_volume({'id': '2', 'name': 'v2', 'size': 3, |
| 134 | @@ -300,7 +343,7 @@ class StorPoolTestCase(test.TestCase): |
| 135 | self.assertVolumeNames(('1', '2')) |
| 136 | v = volumes[volumeName('2')] |
| 137 | self.assertEqual(3 * units.Gi, v['size']) |
| 138 | - self.assertNotIn('template', v.keys()) |
| 139 | + self.assertIsNone(v['template']) |
| 140 | self.assertEqual(3, v['replication']) |
| 141 | |
| 142 | self.driver.create_volume({'id': '3', 'name': 'v2', 'size': 4, |
| 143 | @@ -322,7 +365,7 @@ class StorPoolTestCase(test.TestCase): |
| 144 | # Make sure the dictionary is not corrupted somehow... |
| 145 | v = volumes[volumeName('1')] |
| 146 | self.assertEqual(2 * units.Gi, v['size']) |
| 147 | - self.assertNotIn('template', v.keys()) |
| 148 | + self.assertIsNone(v['template']) |
| 149 | self.assertEqual(3, v['replication']) |
| 150 | |
| 151 | for vid in ('1', '2', '3', '4'): |
| 152 | @@ -386,16 +429,17 @@ class StorPoolTestCase(test.TestCase): |
| 153 | self.driver.extend_volume({'id': '1'}, 2) |
| 154 | self.assertEqual(2 * units.Gi, volumes[volumeName('1')]['size']) |
| 155 | |
| 156 | - self.driver.create_cloned_volume({'id': '2', 'name': 'clo', 'size': 3}, |
| 157 | - {'id': 1}) |
| 158 | + with mock.patch.object(self.driver, 'db', new=MockVolumeDB()): |
| 159 | + self.driver.create_cloned_volume( |
| 160 | + {'id': '2', 'name': 'clo', 'size': 3, 'volume_type': None}, |
| 161 | + {'id': 1}) |
| 162 | self.assertVolumeNames(('1', '2')) |
| 163 | self.assertDictEqual({}, snapshots) |
| 164 | - # Note: this would not be true in a real environment (the snapshot will |
| 165 | - # have been deleted, the volume would have no parent), but with this |
| 166 | - # fake implementation it helps us make sure that the second volume was |
| 167 | - # created with the proper options. |
| 168 | - self.assertEqual(volumes[volumeName('2')]['parent'], |
| 169 | - snapshotName('clone', '2')) |
| 170 | + # We do not provide a StorPool template name in either of the volumes' |
| 171 | + # types, so create_cloned_volume() should take the baseOn shortcut. |
| 172 | + vol2 = volumes[volumeName('2')] |
| 173 | + self.assertEqual(vol2['baseOn'], volumeName('1')) |
| 174 | + self.assertNotIn('parent', vol2) |
| 175 | |
| 176 | self.driver.delete_volume({'id': 1}) |
| 177 | self.driver.delete_volume({'id': 2}) |
| 178 | @@ -403,6 +447,78 @@ class StorPoolTestCase(test.TestCase): |
| 179 | self.assertDictEqual({}, volumes) |
| 180 | self.assertDictEqual({}, snapshots) |
| 181 | |
| 182 | + @ddt.data(*itertools.product( |
| 183 | + [None] + [{'id': key} for key in sorted(volume_types.keys())], |
| 184 | + [None] + [{'id': key} for key in sorted(volume_types.keys())])) |
| 185 | + @ddt.unpack |
| 186 | + @mock_volume_types |
| 187 | + def test_create_cloned_volume(self, src_type, dst_type): |
| 188 | + self.assertDictEqual({}, volumes) |
| 189 | + self.assertDictEqual({}, snapshots) |
| 190 | + |
| 191 | + src_template = ( |
| 192 | + None |
| 193 | + if src_type is None |
| 194 | + else volume_types[src_type['id']].get('storpool_template') |
| 195 | + ) |
| 196 | + dst_template = ( |
| 197 | + None |
| 198 | + if dst_type is None |
| 199 | + else volume_types[dst_type['id']].get('storpool_template') |
| 200 | + ) |
| 201 | + src_name = 's-none' if src_template is None else 's-' + src_template |
| 202 | + dst_name = 'd-none' if dst_template is None else 'd-' + dst_template |
| 203 | + |
| 204 | + snap_name = snapshotName('clone', '2') |
| 205 | + |
| 206 | + vdata1 = { |
| 207 | + 'id': '1', |
| 208 | + 'name': src_name, |
| 209 | + 'size': 1, |
| 210 | + 'volume_type': src_type, |
| 211 | + } |
| 212 | + self.assertEqual( |
| 213 | + self.driver._template_from_volume(vdata1), |
| 214 | + src_template) |
| 215 | + self.driver.create_volume(vdata1) |
| 216 | + self.assertVolumeNames(('1',)) |
| 217 | + |
| 218 | + vdata2 = { |
| 219 | + 'id': 2, |
| 220 | + 'name': dst_name, |
| 221 | + 'size': 1, |
| 222 | + 'volume_type': dst_type, |
| 223 | + } |
| 224 | + self.assertEqual( |
| 225 | + self.driver._template_from_volume(vdata2), |
| 226 | + dst_template) |
| 227 | + with mock.patch.object(self.driver, 'db', |
| 228 | + new=MockVolumeDB(vol_types={'1': src_type})): |
| 229 | + self.driver.create_cloned_volume(vdata2, {'id': '1'}) |
| 230 | + self.assertVolumeNames(('1', '2')) |
| 231 | + vol2 = volumes[volumeName('2')] |
| 232 | + self.assertEqual(vol2['template'], dst_template) |
| 233 | + |
| 234 | + if src_template == dst_template: |
| 235 | + self.assertEqual(vol2['baseOn'], volumeName('1')) |
| 236 | + self.assertNotIn('parent', vol2) |
| 237 | + |
| 238 | + self.assertDictEqual({}, snapshots) |
| 239 | + else: |
| 240 | + self.assertNotIn('baseOn', vol2) |
| 241 | + self.assertEqual(vol2['parent'], snap_name) |
| 242 | + |
| 243 | + self.assertSnapshotNames((('clone', '2'),)) |
| 244 | + self.assertEqual(snapshots[snap_name]['template'], dst_template) |
| 245 | + |
| 246 | + self.driver.delete_volume({'id': '1'}) |
| 247 | + self.driver.delete_volume({'id': '2'}) |
| 248 | + if src_template != dst_template: |
| 249 | + del snapshots[snap_name] |
| 250 | + |
| 251 | + self.assertDictEqual({}, volumes) |
| 252 | + self.assertDictEqual({}, snapshots) |
| 253 | + |
| 254 | @mock_volume_types |
| 255 | def test_config_replication(self): |
| 256 | self.assertVolumeNames([]) |
| 257 | @@ -422,7 +538,7 @@ class StorPoolTestCase(test.TestCase): |
| 258 | self.assertVolumeNames(('cfgrepl1',)) |
| 259 | v = volumes[volumeName('cfgrepl1')] |
| 260 | self.assertEqual(3, v['replication']) |
| 261 | - self.assertNotIn('template', v) |
| 262 | + self.assertIsNone(v['template']) |
| 263 | self.driver.delete_volume({'id': 'cfgrepl1'}) |
| 264 | |
| 265 | self.driver.configuration.storpool_replication = 2 |
| 266 | @@ -436,7 +552,7 @@ class StorPoolTestCase(test.TestCase): |
| 267 | self.assertVolumeNames(('cfgrepl2',)) |
| 268 | v = volumes[volumeName('cfgrepl2')] |
| 269 | self.assertEqual(2, v['replication']) |
| 270 | - self.assertNotIn('template', v) |
| 271 | + self.assertIsNone(v['template']) |
| 272 | self.driver.delete_volume({'id': 'cfgrepl2'}) |
| 273 | |
| 274 | self.driver.create_volume({'id': 'cfgrepl3', 'name': 'v1', 'size': 1, |
| 275 | @@ -468,7 +584,7 @@ class StorPoolTestCase(test.TestCase): |
| 276 | self.assertVolumeNames(('cfgtempl1',)) |
| 277 | v = volumes[volumeName('cfgtempl1')] |
| 278 | self.assertEqual(3, v['replication']) |
| 279 | - self.assertNotIn('template', v) |
| 280 | + self.assertIsNone(v['template']) |
| 281 | self.driver.delete_volume({'id': 'cfgtempl1'}) |
| 282 | |
| 283 | self.driver.create_volume({'id': 'cfgtempl2', 'name': 'v1', 'size': 1, |
| 284 | diff --git a/cinder/volume/drivers/storpool.py b/cinder/volume/drivers/storpool.py |
| 285 | index 58ceed1be..36ad98ded 100644 |
| 286 | --- a/cinder/volume/drivers/storpool.py |
| 287 | +++ b/cinder/volume/drivers/storpool.py |
| 288 | @@ -1,4 +1,4 @@ |
| 289 | -# Copyright (c) 2014 - 2021 StorPool |
| 290 | +# Copyright (c) 2014 - 2022 StorPool |
| 291 | # All Rights Reserved. |
| 292 | # |
| 293 | # Licensed under the Apache License, Version 2.0 (the "License"); you may |
| 294 | @@ -19,11 +19,13 @@ import platform |
| 295 | |
| 296 | from oslo_config import cfg |
| 297 | from oslo_log import log as logging |
| 298 | +from oslo_utils import excutils |
| 299 | from oslo_utils import importutils |
| 300 | from oslo_utils import units |
| 301 | import six |
| 302 | |
| 303 | from cinder.common import constants |
| 304 | +from cinder import context |
| 305 | from cinder import exception |
| 306 | from cinder.i18n import _ |
| 307 | from cinder import interface |
| 308 | @@ -94,6 +96,7 @@ class StorPoolDriver(driver.VolumeDriver): |
| 309 | 2.0.0 - Drop _attach_volume() and _detach_volume(), our os-brick |
| 310 | connector will handle this. |
| 311 | - Drop backup_volume() |
| 312 | + - Avoid data duplication in create_cloned_volume() |
| 313 | """ |
| 314 | |
| 315 | VERSION = '2.0.0' |
| 316 | @@ -200,30 +203,80 @@ class StorPoolDriver(driver.VolumeDriver): |
| 317 | |
| 318 | def create_cloned_volume(self, volume, src_vref): |
| 319 | refname = self._attach.volumeName(src_vref['id']) |
| 320 | + size = int(volume['size']) * units.Gi |
| 321 | + volname = self._attach.volumeName(volume['id']) |
| 322 | + |
| 323 | + src_volume = self.db.volume_get( |
| 324 | + context.get_admin_context(), |
| 325 | + src_vref['id'], |
| 326 | + ) |
| 327 | + src_template = self._template_from_volume(src_volume) |
| 328 | + |
| 329 | + template = self._template_from_volume(volume) |
| 330 | + LOG.debug('clone volume id %(vol_id)s template %(template)s', { |
| 331 | + 'vol_id': repr(volume['id']), |
| 332 | + 'template': repr(template), |
| 333 | + }) |
| 334 | + if template == src_template: |
| 335 | + LOG.info('Using baseOn to clone a volume into the same template') |
| 336 | + try: |
| 337 | + self._attach.api().volumeCreate({ |
| 338 | + 'name': volname, |
| 339 | + 'size': size, |
| 340 | + 'baseOn': refname, |
| 341 | + }) |
| 342 | + except spapi.ApiError as e: |
| 343 | + raise self._backendException(e) |
| 344 | + |
| 345 | + return None |
| 346 | + |
| 347 | snapname = self._attach.snapshotName('clone', volume['id']) |
| 348 | + LOG.info( |
| 349 | + 'A transient snapshot for a %(src)s -> %(dst)s template change', |
| 350 | + {'src': src_template, 'dst': template}) |
| 351 | try: |
| 352 | self._attach.api().snapshotCreate(refname, {'name': snapname}) |
| 353 | except spapi.ApiError as e: |
| 354 | - raise self._backendException(e) |
| 355 | + if e.name != 'objectExists': |
| 356 | + raise self._backendException(e) |
| 357 | |
| 358 | - size = int(volume['size']) * units.Gi |
| 359 | - volname = self._attach.volumeName(volume['id']) |
| 360 | try: |
| 361 | - self._attach.api().volumeCreate({ |
| 362 | - 'name': volname, |
| 363 | - 'size': size, |
| 364 | - 'parent': snapname |
| 365 | - }) |
| 366 | - except spapi.ApiError as e: |
| 367 | - raise self._backendException(e) |
| 368 | - finally: |
| 369 | try: |
| 370 | - self._attach.api().snapshotDelete(snapname) |
| 371 | + self._attach.api().snapshotUpdate( |
| 372 | + snapname, |
| 373 | + {'template': template}, |
| 374 | + ) |
| 375 | + except spapi.ApiError as e: |
| 376 | + raise self._backendException(e) |
| 377 | + |
| 378 | + try: |
| 379 | + self._attach.api().volumeCreate({ |
| 380 | + 'name': volname, |
| 381 | + 'size': size, |
| 382 | + 'parent': snapname |
| 383 | + }) |
| 384 | except spapi.ApiError as e: |
| 385 | - # ARGH! |
| 386 | - LOG.error("Could not delete the temp snapshot %(name)s: " |
| 387 | - "%(msg)s", |
| 388 | - {'name': snapname, 'msg': e}) |
| 389 | + raise self._backendException(e) |
| 390 | + |
| 391 | + try: |
| 392 | + self._attach.api().snapshotUpdate( |
| 393 | + snapname, |
| 394 | + {'tags': {'transient': '1.0'}}, |
| 395 | + ) |
| 396 | + except spapi.ApiError as e: |
| 397 | + raise self._backendException(e) |
| 398 | + except Exception: |
| 399 | + with excutils.save_and_reraise_exception(): |
| 400 | + try: |
| 401 | + LOG.warning( |
| 402 | + 'Something went wrong, removing the transient snapshot' |
| 403 | + ) |
| 404 | + self._attach.api().snapshotDelete(snapname) |
| 405 | + except spapi.ApiError as e: |
| 406 | + LOG.error( |
| 407 | + 'Could not delete the %(name)s snapshot: %(err)s', |
| 408 | + {'name': snapname, 'err': str(e)} |
| 409 | + ) |
| 410 | |
| 411 | def create_export(self, context, volume, connector): |
| 412 | pass |
| 413 | diff --git a/driver-requirements.txt b/driver-requirements.txt |
| 414 | index cc8da8eb5..ec05b01c6 100644 |
| 415 | --- a/driver-requirements.txt |
| 416 | +++ b/driver-requirements.txt |
| 417 | @@ -40,7 +40,7 @@ infi.dtypes.wwn # PSF |
| 418 | infi.dtypes.iqn # PSF |
| 419 | |
| 420 | # Storpool |
| 421 | -storpool>=4.0.0 # Apache-2.0 |
| 422 | +storpool>=7.1.0 # Apache-2.0 |
| 423 | storpool.spopenstack>=2.2.1 # Apache-2.0 |
| 424 | |
| 425 | # Datera |