exp: use the patches that I submitted for review.
Change-Id: I82b077496af46e728080afa3bf18390898f72aac
diff --git a/patches/openstack/cinder/clone-across-pools.patch b/patches/openstack/cinder/clone-across-pools.patch
new file mode 100644
index 0000000..3279d6d
--- /dev/null
+++ b/patches/openstack/cinder/clone-across-pools.patch
@@ -0,0 +1,370 @@
+From 8fafd470b34b4b94d0e6e65f1a6049232f06b9f8 Mon Sep 17 00:00:00 2001
+From: Peter Penchev <openstack-dev@storpool.com>
+Date: Wed, 22 Jun 2022 10:04:31 +0300
+Subject: [PATCH 2/8] Add the clone_across_pools driver capability
+
+Let drivers declare that they can clone a volume into a different pool and
+relax the checks when creating a volume from an image (both with and
+without the image cache).
+
+TODO: document this capability a bit more.
+
+Change-Id: Idbac4dae00f9fa03fb14547a204666721bf67d96
+Implements: blueprint clone-across-pools
+---
+ cinder/image/cache.py | 11 ++-
+ cinder/tests/unit/image/test_cache.py | 14 ++--
+ .../volume/flows/test_create_volume_flow.py | 73 +++++++++++++++++++
+ cinder/volume/flows/manager/create_volume.py | 8 +-
+ cinder/volume/manager.py | 3 +-
+ doc/source/reference/support-matrix.ini | 73 +++++++++++++++++++
+ 6 files changed, 169 insertions(+), 13 deletions(-)
+
+diff --git a/cinder/image/cache.py b/cinder/image/cache.py
+index 6d30210a4..a79451bf2 100644
+--- a/cinder/image/cache.py
++++ b/cinder/image/cache.py
+@@ -34,11 +34,13 @@ class ImageVolumeCache(object):
+ db,
+ volume_api,
+ max_cache_size_gb: int = 0,
+- max_cache_size_count: int = 0):
++ max_cache_size_count: int = 0,
++ clone_across_pools: bool = False):
+ self.db = db
+ self.volume_api = volume_api
+ self.max_cache_size_gb = int(max_cache_size_gb)
+ self.max_cache_size_count = int(max_cache_size_count)
++ self.clone_across_pools = bool(clone_across_pools)
+ self.notifier = rpc.get_notifier('volume', CONF.host)
+
+ def get_by_image_volume(self,
+@@ -55,11 +57,12 @@ class ImageVolumeCache(object):
+ self._notify_cache_eviction(context, cache_entry['image_id'],
+ cache_entry['host'])
+
+- @staticmethod
+- def _get_query_filters(volume_ref: objects.Volume) -> dict:
++ def _get_query_filters(self, volume_ref: objects.Volume) -> dict:
+ if volume_ref.is_clustered:
+ return {'cluster_name': volume_ref.cluster_name}
+- return {'host': volume_ref.host}
++ if not self.clone_across_pools:
++ return {'host': volume_ref.host}
++ return {}
+
+ def get_entry(self,
+ context: context.RequestContext,
+diff --git a/cinder/tests/unit/image/test_cache.py b/cinder/tests/unit/image/test_cache.py
+index c3aba9b28..b8b704e0d 100644
+--- a/cinder/tests/unit/image/test_cache.py
++++ b/cinder/tests/unit/image/test_cache.py
+@@ -42,11 +42,12 @@ class ImageVolumeCacheTestCase(test.TestCase):
+ self.volume.update(vol_params)
+ self.volume_ovo = objects.Volume(self.context, **vol_params)
+
+- def _build_cache(self, max_gb=0, max_count=0):
++ def _build_cache(self, max_gb=0, max_count=0, clone_across_pools=False):
+ cache = image_cache.ImageVolumeCache(self.mock_db,
+ self.mock_volume_api,
+ max_gb,
+- max_count)
++ max_count,
++ clone_across_pools)
+ cache.notifier = self.notifier
+ return cache
+
+@@ -91,9 +92,10 @@ class ImageVolumeCacheTestCase(test.TestCase):
+ self.assertEqual(entry['image_id'], msg['payload']['image_id'])
+ self.assertEqual(1, len(self.notifier.notifications))
+
+- @ddt.data(True, False)
+- def test_get_entry(self, clustered):
+- cache = self._build_cache()
++ @ddt.data((True, True), (True, False), (False, True), (False, False))
++ @ddt.unpack
++ def test_get_entry(self, clustered, clone_across_pools):
++ cache = self._build_cache(clone_across_pools=clone_across_pools)
+ entry = self._build_entry()
+ image_meta = {
+ 'is_public': True,
+@@ -107,7 +109,7 @@ class ImageVolumeCacheTestCase(test.TestCase):
+ image_volume_cache_get_and_update_last_used.return_value) = entry
+ if not clustered:
+ self.volume_ovo.cluster_name = None
+- expect = {'host': self.volume.host}
++ expect = {} if clone_across_pools else {'host': self.volume.host}
+ else:
+ expect = {'cluster_name': self.volume.cluster_name}
+ found_entry = cache.get_entry(self.context,
+diff --git a/cinder/tests/unit/volume/flows/test_create_volume_flow.py b/cinder/tests/unit/volume/flows/test_create_volume_flow.py
+index 2a7b625bc..b7d43008f 100644
+--- a/cinder/tests/unit/volume/flows/test_create_volume_flow.py
++++ b/cinder/tests/unit/volume/flows/test_create_volume_flow.py
+@@ -1058,6 +1058,7 @@ class CreateVolumeFlowManagerTestCase(test.TestCase):
+ self, volume_get_by_id, vol_update, rekey_vol, cleanup_cg):
+ fake_db = mock.MagicMock()
+ fake_driver = mock.MagicMock()
++ fake_driver.capabilities = {}
+ fake_volume_manager = mock.MagicMock()
+ fake_manager = create_volume_manager.CreateVolumeFromSpecTask(
+ fake_volume_manager, fake_db, fake_driver)
+@@ -1083,6 +1084,7 @@ class CreateVolumeFlowManagerTestCase(test.TestCase):
+ handle_bootable, cleanup_cg):
+ fake_db = mock.MagicMock()
+ fake_driver = mock.MagicMock()
++ fake_driver.capabilities = {}
+ fake_volume_manager = mock.MagicMock()
+ fake_manager = create_volume_manager.CreateVolumeFromSpecTask(
+ fake_volume_manager, fake_db, fake_driver)
+@@ -1108,6 +1110,7 @@ class CreateVolumeFlowManagerTestCase(test.TestCase):
+ mock_cleanup_cg):
+ fake_db = mock.MagicMock()
+ fake_driver = mock.MagicMock()
++ fake_driver.capabilities = {}
+ fake_volume_manager = mock.MagicMock()
+ fake_manager = create_volume_manager.CreateVolumeFromSpecTask(
+ fake_volume_manager, fake_db, fake_driver)
+@@ -1144,6 +1147,7 @@ class CreateVolumeFlowManagerTestCase(test.TestCase):
+ mock_cleanup_cg):
+ fake_db = mock.MagicMock()
+ fake_driver = mock.MagicMock()
++ fake_driver.capabilities = {}
+ fake_volume_manager = mock.MagicMock()
+ fake_cache = mock.MagicMock()
+ fake_manager = create_volume_manager.CreateVolumeFromSpecTask(
+@@ -1192,6 +1196,7 @@ class CreateVolumeFlowManagerTestCase(test.TestCase):
+ mock_cleanup_cg):
+ fake_db = mock.MagicMock()
+ fake_driver = mock.MagicMock()
++ fake_driver.capabilities = {}
+ fake_volume_manager = mock.MagicMock()
+ fake_manager = create_volume_manager.CreateVolumeFromSpecTask(
+ fake_volume_manager, fake_db, fake_driver)
+@@ -1250,6 +1255,7 @@ class CreateVolumeFlowManagerTestCase(test.TestCase):
+ driver_error):
+ fake_db = mock.MagicMock()
+ fake_driver = mock.MagicMock()
++ fake_driver.capabilities = {}
+ fake_volume_manager = mock.MagicMock()
+ backup_host = 'host@backend#pool'
+ fake_manager = create_volume_manager.CreateVolumeFromSpecTask(
+@@ -1289,6 +1295,7 @@ class CreateVolumeFlowManagerTestCase(test.TestCase):
+ def test_create_drive_error(self, mock_message_create):
+ fake_db = mock.MagicMock()
+ fake_driver = mock.MagicMock()
++ fake_driver.capabilities = {}
+ fake_volume_manager = mock.MagicMock()
+ fake_manager = create_volume_manager.CreateVolumeFromSpecTask(
+ fake_volume_manager, fake_db, fake_driver)
+@@ -1490,6 +1497,7 @@ class CreateVolumeFlowManagerGlanceCinderBackendCase(test.TestCase):
+ spec=utils.get_file_spec())
+ fake_db = mock.MagicMock()
+ fake_driver = mock.MagicMock()
++ fake_driver.capabilities = {}
+ fake_manager = create_volume_manager.CreateVolumeFromSpecTask(
+ mock.MagicMock(), fake_db, fake_driver)
+ fake_image_service = fake_image.FakeImageService()
+@@ -1516,6 +1524,7 @@ class CreateVolumeFlowManagerGlanceCinderBackendCase(test.TestCase):
+ 'cinder_encryption_key_id': None}
+
+ fake_driver.clone_image.return_value = (None, False)
++ fake_db.volume_get_all.return_value = []
+ fake_db.volume_get_all_by_host.return_value = [image_volume]
+
+ fake_manager._create_from_image(self.ctxt,
+@@ -1534,6 +1543,69 @@ class CreateVolumeFlowManagerGlanceCinderBackendCase(test.TestCase):
+ self.assertFalse(fake_driver.create_cloned_volume.called)
+ mock_cleanup_cg.assert_called_once_with(volume)
+
++ @mock.patch('cinder.volume.flows.manager.create_volume.'
++ 'CreateVolumeFromSpecTask.'
++ '_cleanup_cg_in_volume')
++ @mock.patch('cinder.image.image_utils.TemporaryImages.fetch')
++ @mock.patch('cinder.volume.flows.manager.create_volume.'
++ 'CreateVolumeFromSpecTask.'
++ '_handle_bootable_volume_glance_meta')
++ @mock.patch('cinder.image.image_utils.qemu_img_info')
++ def test_create_from_image_across(self, mock_qemu_info, handle_bootable,
++ mock_fetch_img, mock_cleanup_cg,
++ format='raw', owner=None,
++ location=True):
++ self.flags(allowed_direct_url_schemes=['cinder'])
++ mock_fetch_img.return_value = mock.MagicMock(
++ spec=utils.get_file_spec())
++ fake_db = mock.MagicMock()
++ fake_driver = mock.MagicMock()
++ fake_driver.capabilities = {'clone_across_pools': True}
++ fake_manager = create_volume_manager.CreateVolumeFromSpecTask(
++ mock.MagicMock(), fake_db, fake_driver)
++ fake_image_service = fake_image.FakeImageService()
++
++ volume = fake_volume.fake_volume_obj(self.ctxt,
++ host='host@backend#pool')
++ image_volume = fake_volume.fake_volume_obj(self.ctxt,
++ volume_metadata={})
++ image_id = fakes.IMAGE_ID
++ image_info = imageutils.QemuImgInfo()
++ image_info.virtual_size = '1073741824'
++ mock_qemu_info.return_value = image_info
++
++ url = 'cinder://%s' % image_volume['id']
++ image_location = None
++ if location:
++ image_location = (url, [{'url': url, 'metadata': {}}])
++ image_meta = {'id': image_id,
++ 'container_format': 'bare',
++ 'disk_format': format,
++ 'size': 1024,
++ 'owner': owner or self.ctxt.project_id,
++ 'virtual_size': None,
++ 'cinder_encryption_key_id': None}
++
++ fake_driver.clone_image.return_value = (None, False)
++ fake_db.volume_get_all.return_value = [image_volume]
++ fake_db.volume_get_all_by_host.return_value = []
++
++ fake_manager._create_from_image(self.ctxt,
++ volume,
++ image_location,
++ image_id,
++ image_meta,
++ fake_image_service)
++ if format == 'raw' and not owner and location:
++ fake_driver.create_cloned_volume.assert_called_once_with(
++ volume, image_volume)
++ handle_bootable.assert_called_once_with(self.ctxt, volume,
++ image_id=image_id,
++ image_meta=image_meta)
++ else:
++ self.assertFalse(fake_driver.create_cloned_volume.called)
++ mock_cleanup_cg.assert_called_once_with(volume)
++
+ LEGACY_URI = 'cinder://%s' % fakes.VOLUME_ID
+ MULTISTORE_URI = 'cinder://fake-store/%s' % fakes.VOLUME_ID
+
+@@ -1560,6 +1632,7 @@ class CreateVolumeFlowManagerGlanceCinderBackendCase(test.TestCase):
+ spec=utils.get_file_spec())
+ fake_db = mock.MagicMock()
+ fake_driver = mock.MagicMock()
++ fake_driver.capabilities = {}
+ fake_manager = create_volume_manager.CreateVolumeFromSpecTask(
+ mock.MagicMock(), fake_db, fake_driver)
+ fake_image_service = fake_image.FakeImageService()
+diff --git a/cinder/volume/flows/manager/create_volume.py b/cinder/volume/flows/manager/create_volume.py
+index 4c18d7d84..187f3b3ed 100644
+--- a/cinder/volume/flows/manager/create_volume.py
++++ b/cinder/volume/flows/manager/create_volume.py
+@@ -732,8 +732,12 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
+ urls = list(set([direct_url]
+ + [loc.get('url') for loc in locations or []]))
+ image_volume_ids = self._extract_cinder_ids(urls)
+- image_volumes = self.db.volume_get_all_by_host(
+- context, volume['host'], filters={'id': image_volume_ids})
++ if self.driver.capabilities.get('clone_across_pools'):
++ image_volumes = self.db.volume_get_all(
++ context, filters={'id': image_volume_ids})
++ else:
++ image_volumes = self.db.volume_get_all_by_host(
++ context, volume['host'], filters={'id': image_volume_ids})
+
+ for image_volume in image_volumes:
+ # For the case image volume is stored in the service tenant,
+diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py
+index 804a1c6cb..ecf264d10 100644
+--- a/cinder/volume/manager.py
++++ b/cinder/volume/manager.py
+@@ -356,7 +356,8 @@ class VolumeManager(manager.CleanableManager,
+ self.db,
+ cinder_volume.API(),
+ max_cache_size,
+- max_cache_entries
++ max_cache_entries,
++ self.driver.capabilities.get('clone_across_pools', False)
+ )
+ LOG.info('Image-volume cache enabled for host %(host)s.',
+ {'host': self.host})
+diff --git a/doc/source/reference/support-matrix.ini b/doc/source/reference/support-matrix.ini
+index a379139b9..38a279266 100644
+--- a/doc/source/reference/support-matrix.ini
++++ b/doc/source/reference/support-matrix.ini
+@@ -957,3 +957,76 @@ driver.vmware=missing
+ driver.win_iscsi=missing
+ driver.win_smb=missing
+ driver.zadara=missing
++
++[operation.clone_across_pools]
++title=Clone a volume into a different pool
++status=optional
++notes=Vendor drivers that support cloning a volume into a different
++ storage pool, e.g. when creating a volume from a Cinder-backed
++ Glance image.
++driver.datera=missing
++driver.dell_emc_powermax=missing
++driver.dell_emc_powerstore=missing
++driver.dell_emc_powervault=missing
++driver.dell_emc_sc=missing
++driver.dell_emc_unity=missing
++driver.dell_emc_vmax_af=missing
++driver.dell_emc_vmax_3=missing
++driver.dell_emc_vnx=missing
++driver.dell_emc_powerflex=missing
++driver.dell_emc_xtremio=missing
++driver.fujitsu_eternus=missing
++driver.hitachi_vsp=missing
++driver.hpe_3par=missing
++driver.hpe_msa=missing
++driver.hpe_nimble=missing
++driver.huawei_t_v1=missing
++driver.huawei_t_v2=missing
++driver.huawei_v3=missing
++driver.huawei_f_v3=missing
++driver.huawei_f_v5=missing
++driver.huawei_v5=missing
++driver.huawei_18000=missing
++driver.huawei_dorado=missing
++driver.huawei_fusionstorage=missing
++driver.infinidat=missing
++driver.ibm_ds8k=missing
++driver.ibm_flashsystem=missing
++driver.ibm_gpfs=missing
++driver.ibm_storwize=missing
++driver.ibm_xiv=missing
++driver.infortrend=missing
++driver.inspur=missing
++driver.inspur_as13000=missing
++driver.kaminario=missing
++driver.kioxia_kumoscale=missing
++driver.lenovo=missing
++driver.lightbits_lightos=missing
++driver.linbit_linstor=missing
++driver.lvm=missing
++driver.macrosan=missing
++driver.nec=missing
++driver.nec_v=missing
++driver.netapp_ontap=missing
++driver.netapp_solidfire=missing
++driver.nexenta=missing
++driver.nfs=missing
++driver.opene_joviandss=missing
++driver.prophetstor=missing
++driver.pure=missing
++driver.qnap=missing
++driver.quobyte=missing
++driver.rbd=missing
++driver.rbd_iscsi=missing
++driver.sandstone=missing
++driver.seagate=missing
++driver.storpool=missing
++driver.synology=missing
++driver.toyou_netstor=missing
++driver.vrtsaccess=missing
++driver.vrtscnfs=missing
++driver.vzstorage=missing
++driver.vmware=missing
++driver.win_iscsi=missing
++driver.win_smb=missing
++driver.zadara=missing
+--
+2.35.1
+
diff --git a/patches/openstack/cinder/glance-upload-uri.patch b/patches/openstack/cinder/glance-upload-uri.patch
new file mode 100644
index 0000000..35964c8
--- /dev/null
+++ b/patches/openstack/cinder/glance-upload-uri.patch
@@ -0,0 +1,243 @@
+From 8f0964a893b98faa6f8fc751a67601c6b58366ed Mon Sep 17 00:00:00 2001
+From: Peter Penchev <openstack-dev@storpool.com>
+Date: Thu, 9 Jun 2022 01:43:28 +0300
+Subject: [PATCH 1/8] Send the correct location URI to the Glance v2 API
+
+When uploading a volume to an image, send the new-style
+cinder://<store-id>/<volume-id> URL to the Glance API if
+image_service:store_id is present in the volume type extra specs.
+
+Closes-Bug: #1978020
+Co-Authored-By: Rajat Dhasmana <rajatdhasmana@gmail.com>
+Change-Id: I815706f691a7d1e5a0c54eb15222417008ef1f34
+---
+ cinder/tests/unit/volume/test_image.py | 2 +-
+ .../tests/unit/volume/test_volume_manager.py | 160 ++++++++++++++++++
+ cinder/volume/manager.py | 4 +-
+ ...20-glance-upload-uri-8fbc70c442ac620c.yaml | 11 ++
+ 4 files changed, 175 insertions(+), 2 deletions(-)
+ create mode 100644 releasenotes/notes/bug-1978020-glance-upload-uri-8fbc70c442ac620c.yaml
+
+diff --git a/cinder/tests/unit/volume/test_image.py b/cinder/tests/unit/volume/test_image.py
+index ebc7904c4..2b1b9c392 100644
+--- a/cinder/tests/unit/volume/test_image.py
++++ b/cinder/tests/unit/volume/test_image.py
+@@ -366,7 +366,7 @@ class CopyVolumeToImageTestCase(base.BaseVolumeTestCase):
+ def test_copy_volume_to_image_with_image_volume(self):
+ image = self._test_copy_volume_to_image_with_image_volume()
+ self.assertTrue(image['locations'][0]['url'].startswith('cinder://'))
+- image_volume_id = image['locations'][0]['url'][9:]
++ image_volume_id = image['locations'][0]['url'].split('/')[-1]
+ # The image volume does NOT include the snapshot_id, and include the
+ # source_volid which is the uploaded-volume id.
+ vol_ref = db.volume_get(self.context, image_volume_id)
+diff --git a/cinder/tests/unit/volume/test_volume_manager.py b/cinder/tests/unit/volume/test_volume_manager.py
+index 3b751d2cd..547a48fdd 100644
+--- a/cinder/tests/unit/volume/test_volume_manager.py
++++ b/cinder/tests/unit/volume/test_volume_manager.py
+@@ -287,3 +287,163 @@ class VolumeManagerTestCase(base.BaseVolumeTestCase):
+ manager._parse_connection_options(ctxt, vol, conn_info)
+ self.assertIn('cacheable', conn_info['data'])
+ self.assertIs(conn_info['data']['cacheable'], False)
++
++ @mock.patch('cinder.message.api.API.create')
++ @mock.patch('cinder.volume.volume_utils.require_driver_initialized')
++ @mock.patch('cinder.volume.manager.VolumeManager._clone_image_volume')
++ @mock.patch('cinder.db.volume_metadata_update')
++ def test_clone_image_no_volume(self,
++ fake_update,
++ fake_clone,
++ fake_msg_create,
++ fake_init):
++ """Make sure nothing happens if no volume was created."""
++ manager = vol_manager.VolumeManager()
++
++ ctx = mock.sentinel.context
++ volume = fake_volume.fake_volume_obj(ctx)
++ image_service = mock.MagicMock(spec=[])
++
++ fake_clone.return_value = None
++
++ image_meta = {'disk_format': 'raw', 'container_format': 'ova'}
++ manager._clone_image_volume_and_add_location(ctx, volume,
++ image_service, image_meta)
++ fake_clone.assert_not_called()
++ fake_update.assert_not_called()
++
++ image_meta = {'disk_format': 'qcow2', 'container_format': 'bare'}
++ manager._clone_image_volume_and_add_location(ctx, volume,
++ image_service, image_meta)
++ fake_clone.assert_not_called()
++ fake_update.assert_not_called()
++
++ image_meta = {'disk_format': 'raw', 'container_format': 'bare'}
++ manager._clone_image_volume_and_add_location(ctx, volume,
++ image_service, image_meta)
++ fake_clone.assert_called_once_with(ctx, volume, image_meta)
++ fake_update.assert_not_called()
++
++ @mock.patch('cinder.message.api.API.create')
++ @mock.patch('cinder.objects.VolumeType.get_by_id')
++ @mock.patch('cinder.volume.volume_utils.require_driver_initialized')
++ @mock.patch('cinder.volume.manager.VolumeManager._clone_image_volume')
++ @mock.patch('cinder.db.volume_metadata_update')
++ def test_clone_image_no_store_id(self,
++ fake_update,
++ fake_clone,
++ fake_msg_create,
++ fake_volume_type_get,
++ fake_init):
++ """Send a cinder://<volume-id> URL if no store ID in extra specs."""
++ manager = vol_manager.VolumeManager()
++
++ project_id = fake.PROJECT_ID
++
++ ctx = mock.MagicMock()
++ ctx.elevated.return_value = ctx
++ ctx.project_id = project_id
++
++ vol_type = fake_volume.fake_volume_type_obj(
++ ctx,
++ id=fake.VOLUME_TYPE_ID,
++ name=fake.VOLUME_TYPE_NAME,
++ extra_specs={'volume_type_backend': 'unknown'})
++ fake_volume_type_get.return_value = vol_type
++
++ volume = fake_volume.fake_volume_obj(ctx,
++ id=fake.VOLUME_ID,
++ volume_type_id=vol_type.id)
++
++ image_volume_id = fake.VOLUME2_ID
++ image_volume = fake_volume.fake_volume_obj(ctx, id=image_volume_id)
++ url = 'cinder://%(vol)s' % {'vol': image_volume_id}
++
++ image_service = mock.MagicMock(spec=['add_location'])
++ image_meta_id = fake.IMAGE_ID
++ image_meta = {
++ 'id': image_meta_id,
++ 'disk_format': 'raw',
++ 'container_format': 'bare',
++ }
++ image_volume_meta = {
++ 'image_owner': project_id,
++ 'glance_image_id': image_meta_id,
++ }
++
++ fake_clone.return_value = image_volume
++
++ manager._clone_image_volume_and_add_location(ctx, volume,
++ image_service, image_meta)
++ fake_clone.assert_called_once_with(ctx, volume, image_meta)
++ fake_update.assert_called_with(ctx, image_volume_id,
++ image_volume_meta, False)
++ image_service.add_location.assert_called_once_with(ctx, image_meta_id,
++ url, {})
++
++ @mock.patch('cinder.message.api.API.create')
++ @mock.patch('cinder.objects.VolumeType.get_by_id')
++ @mock.patch('cinder.volume.volume_utils.require_driver_initialized')
++ @mock.patch('cinder.volume.manager.VolumeManager._clone_image_volume')
++ @mock.patch('cinder.db.volume_metadata_update')
++ def test_clone_image_with_store_id(self,
++ fake_update,
++ fake_clone,
++ fake_msg_create,
++ fake_volume_type_get,
++ fake_init):
++ """Send a cinder://<store-id>/<volume-id> URL."""
++ manager = vol_manager.VolumeManager()
++
++ project_id = fake.PROJECT_ID
++
++ ctx = mock.MagicMock()
++ ctx.elevated.return_value = ctx
++ ctx.project_id = project_id
++
++ store_id = 'muninn'
++ vol_type = fake_volume.fake_volume_type_obj(
++ ctx,
++ id=fake.VOLUME_TYPE_ID,
++ name=fake.VOLUME_TYPE_NAME,
++ extra_specs={
++ 'volume_type_backend': 'unknown',
++ 'image_service:store_id': store_id,
++ })
++ fake_volume_type_get.return_value = vol_type
++
++ volume = fake_volume.fake_volume_obj(ctx,
++ id=fake.VOLUME_ID,
++ volume_type_id=vol_type.id)
++
++ image_volume_id = '42'
++ image_volume = mock.MagicMock(spec=['id'])
++ image_volume.id = image_volume_id
++ url = 'cinder://%(store)s/%(vol)s' % {
++ 'store': store_id,
++ 'vol': image_volume_id,
++ }
++
++ image_service = mock.MagicMock(spec=['add_location'])
++ image_meta_id = fake.IMAGE_ID
++ image_meta = {
++ 'id': image_meta_id,
++ 'disk_format': 'raw',
++ 'container_format': 'bare',
++ }
++ image_volume_meta = {
++ 'image_owner': project_id,
++ 'glance_image_id': image_meta_id,
++ }
++
++ fake_clone.return_value = image_volume
++
++ manager._clone_image_volume_and_add_location(ctx, volume,
++ image_service, image_meta)
++ fake_clone.assert_called_once_with(ctx, volume, image_meta)
++ fake_update.assert_called_with(ctx, image_volume_id,
++ image_volume_meta, False)
++ image_service.add_location.assert_called_once_with(ctx,
++ image_meta_id,
++ url,
++ {'store': store_id})
+diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py
+index dbb73d894..804a1c6cb 100644
+--- a/cinder/volume/manager.py
++++ b/cinder/volume/manager.py
+@@ -1683,7 +1683,6 @@ class VolumeManager(manager.CleanableManager,
+ image_volume_meta,
+ False)
+
+- uri = 'cinder://%s' % image_volume.id
+ image_registered = None
+
+ # retrieve store information from extra-specs
+@@ -1692,6 +1691,9 @@ class VolumeManager(manager.CleanableManager,
+
+ if store_id:
+ location_metadata['store'] = store_id
++ uri = 'cinder://%s/%s' % (store_id, image_volume.id)
++ else:
++ uri = 'cinder://%s' % image_volume.id
+
+ try:
+ image_registered = image_service.add_location(
+diff --git a/releasenotes/notes/bug-1978020-glance-upload-uri-8fbc70c442ac620c.yaml b/releasenotes/notes/bug-1978020-glance-upload-uri-8fbc70c442ac620c.yaml
+new file mode 100644
+index 000000000..04a909d3d
+--- /dev/null
++++ b/releasenotes/notes/bug-1978020-glance-upload-uri-8fbc70c442ac620c.yaml
+@@ -0,0 +1,11 @@
++---
++fixes:
++ - |
++ `Bug #1978020 <https://bugs.launchpad.net/cinder/+bug/1978020>`_: Fixed
++ uploading a volume to a Cinder-backed Glance image; if a store name is set
++ in the volume type's extra specs, it must also be sent to Glance as part of
++ the new image location URI.
++ Please note that while the `image_service:store_id` extra spec is
++ validated when it is set for the volume type, it is not validated later;
++ it is the operator's responsibility to make sure that the Glance store is
++ not renamed or removed or that the volume types are updated accordingly.
+--
+2.35.1
+
diff --git a/patches/openstack/cinder/storpool-clone-across.patch b/patches/openstack/cinder/storpool-clone-across.patch
new file mode 100644
index 0000000..2bbd2e4
--- /dev/null
+++ b/patches/openstack/cinder/storpool-clone-across.patch
@@ -0,0 +1,49 @@
+From fbda4dbe0f2c495a821cc10dfedbb0b171ac50bc Mon Sep 17 00:00:00 2001
+From: Peter Penchev <openstack-dev@storpool.com>
+Date: Wed, 22 Jun 2022 10:48:25 +0300
+Subject: [PATCH 7/8] StorPool: declare the clone_across_pools capability
+
+Change-Id: I5338c6c4f53a448e495f695cd64b36b722cd947d
+---
+ cinder/volume/drivers/storpool.py | 4 ++++
+ doc/source/reference/support-matrix.ini | 2 +-
+ 2 files changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/cinder/volume/drivers/storpool.py b/cinder/volume/drivers/storpool.py
+index 696c86550..401e3709a 100644
+--- a/cinder/volume/drivers/storpool.py
++++ b/cinder/volume/drivers/storpool.py
+@@ -99,6 +99,9 @@ class StorPoolDriver(driver.VolumeDriver):
+ to or from from Glance images.
+ - Drop backup_volume()
+ - Avoid data duplication in create_cloned_volume()
++ - Declare the capability to clone a volume into a different
++ pool, thus enabling the use of create_cloned_volume() for
++ Cinder-backed Glance images on StorPool volumes
+ """
+
+ VERSION = '2.0.0'
+@@ -367,6 +370,7 @@ class StorPoolDriver(driver.VolumeDriver):
+ 'driver_version': self.VERSION,
+ 'storage_protocol': constants.STORPOOL,
+
++ 'clone_across_pools': True,
+ 'sparse_copy_volume': True,
+
+ 'pools': pools
+diff --git a/doc/source/reference/support-matrix.ini b/doc/source/reference/support-matrix.ini
+index 38a279266..4f19c33e5 100644
+--- a/doc/source/reference/support-matrix.ini
++++ b/doc/source/reference/support-matrix.ini
+@@ -1020,7 +1020,7 @@ driver.rbd=missing
+ driver.rbd_iscsi=missing
+ driver.sandstone=missing
+ driver.seagate=missing
+-driver.storpool=missing
++driver.storpool=complete
+ driver.synology=missing
+ driver.toyou_netstor=missing
+ driver.vrtsaccess=missing
+--
+2.35.1
+
diff --git a/patches/openstack/cinder/storpool-clone-volume.patch b/patches/openstack/cinder/storpool-clone-volume.patch
index 8b8c55c..6e1a83e 100644
--- a/patches/openstack/cinder/storpool-clone-volume.patch
+++ b/patches/openstack/cinder/storpool-clone-volume.patch
@@ -1,27 +1,33 @@
-commit 5563b42f8ba435bc45e08399c77ac350156d9aa2
-Author: Peter Penchev <openstack-dev@storpool.com>
-Date: Wed Apr 20 15:47:39 2022 +0300
+From 78a748a4abd7aa1aa56256222754a1206001af12 Mon Sep 17 00:00:00 2001
+From: Peter Penchev <openstack-dev@storpool.com>
+Date: Wed, 20 Apr 2022 15:47:39 +0300
+Subject: [PATCH 6/8] StorPool: create_cloned_volume() improvements
- StorPool: create_cloned_volume() improvements
-
- If the source and destination volumes are in the same StorPool template
- (as defined by either the volume type or the global config setting),
- forego the need to create the transient snapshot at all and use
- StorPool's "base this volume on that one" API call (which does the same
- thing internally, but much more efficiently and atomically).
-
- If the destination volume should be in a different StorPool template,
- then make sure that the transient snapshot is also in that template so
- that, if other volumes are cloned from the same source volume later,
- they can all use the same data underneath (the internal workings of
- StorPool will detect that all those snapshots are exactly the same and
- not duplicate any data in the destination template). This will avoid
- data duplication, sometimes with drastic results.
-
- Bump the minimum required version of the "storpool" third-party library
- for snapshotUpdate(template=...) support.
-
- Change-Id: Ib9bb76cf2e2f2b035b92e596b1ef185558b190d6
+If the source and destination volumes are in the same StorPool template
+(as defined by either the volume type or the global config setting),
+forego the need to create the transient snapshot at all and use
+StorPool's "base this volume on that one" API call (which does the same
+thing internally, but much more efficiently and atomically).
+
+If the destination volume should be in a different StorPool template,
+then make sure that the transient snapshot is also in that template so
+that, if other volumes are cloned from the same source volume later,
+they can all use the same data underneath (the internal workings of
+StorPool will detect that all those snapshots are exactly the same and
+not duplicate any data in the destination template). This will avoid
+data duplication, sometimes with drastic results.
+
+Bump the minimum required version of the "storpool" third-party library
+for snapshotUpdate(template=...) support.
+
+Change-Id: Ib9bb76cf2e2f2b035b92e596b1ef185558b190d6
+---
+ .../unit/volume/drivers/test_storpool.py | 152 +++++++++++++++---
+ cinder/volume/drivers/storpool.py | 87 ++++++++--
+ driver-requirements.txt | 2 +-
+ ...torpool-clone-better-dca90f40c9273de9.yaml | 6 +
+ 4 files changed, 211 insertions(+), 36 deletions(-)
+ create mode 100644 releasenotes/notes/storpool-clone-better-dca90f40c9273de9.yaml
diff --git a/cinder/tests/unit/volume/drivers/test_storpool.py b/cinder/tests/unit/volume/drivers/test_storpool.py
index 843283db4..51db7f292 100644
@@ -282,7 +288,7 @@
self.driver.create_volume({'id': 'cfgtempl2', 'name': 'v1', 'size': 1,
diff --git a/cinder/volume/drivers/storpool.py b/cinder/volume/drivers/storpool.py
-index 58ceed1be..36ad98ded 100644
+index d76957c88..696c86550 100644
--- a/cinder/volume/drivers/storpool.py
+++ b/cinder/volume/drivers/storpool.py
@@ -1,4 +1,4 @@
@@ -305,15 +311,15 @@
from cinder import exception
from cinder.i18n import _
from cinder import interface
-@@ -94,6 +96,7 @@ class StorPoolDriver(driver.VolumeDriver):
- 2.0.0 - Drop _attach_volume() and _detach_volume(), our os-brick
- connector will handle this.
+@@ -96,6 +98,7 @@ class StorPoolDriver(driver.VolumeDriver):
+ - Detach temporary snapshots and volumes after copying data
+ to or from from Glance images.
- Drop backup_volume()
+ - Avoid data duplication in create_cloned_volume()
"""
VERSION = '2.0.0'
-@@ -200,30 +203,80 @@ class StorPoolDriver(driver.VolumeDriver):
+@@ -202,30 +205,80 @@ class StorPoolDriver(driver.VolumeDriver):
def create_cloned_volume(self, volume, src_vref):
refname = self._attach.volumeName(src_vref['id'])
@@ -423,3 +429,18 @@
storpool.spopenstack>=2.2.1 # Apache-2.0
# Datera
+diff --git a/releasenotes/notes/storpool-clone-better-dca90f40c9273de9.yaml b/releasenotes/notes/storpool-clone-better-dca90f40c9273de9.yaml
+new file mode 100644
+index 000000000..180427d9e
+--- /dev/null
++++ b/releasenotes/notes/storpool-clone-better-dca90f40c9273de9.yaml
+@@ -0,0 +1,6 @@
++---
++features:
++ - |
++ StorPool driver: improved the way volumes are clonsed into different
++ StorPool templates (exposed as Cinder storage pools) if requested,
++ eliminating some data duplication in the underlying StorPool cluster.
+--
+2.35.1
+
diff --git a/patches/openstack/cinder/storpool-rm-backup.patch b/patches/openstack/cinder/storpool-rm-backup.patch
index 0c8481a..1f6e52c 100644
--- a/patches/openstack/cinder/storpool-rm-backup.patch
+++ b/patches/openstack/cinder/storpool-rm-backup.patch
@@ -1,20 +1,28 @@
-From 416b774246fd1c1334bf2e46bbdc6fc644dda4d7 Mon Sep 17 00:00:00 2001
+From 7244e7f90e414a5853959877722f8b35461f5549 Mon Sep 17 00:00:00 2001
From: Peter Penchev <openstack-dev@storpool.com>
Date: Mon, 11 May 2020 11:02:53 +0300
-Subject: [PATCH 1/2] StorPool driver: remove the obsolete backup_volume().
+Subject: [PATCH 5/8] StorPool driver: remove the obsolete backup_volume()
Follow suit with I984de3df803f12dbb95e3309e668b3fbd519e70f.
Change-Id: Ia172452fd7c96dccfe54789d868fcf7b748322a3
---
- cinder/volume/drivers/storpool.py | 29 -----------------------------
- 1 file changed, 29 deletions(-)
+ cinder/volume/drivers/storpool.py | 30 +-----------------------------
+ 1 file changed, 1 insertion(+), 29 deletions(-)
diff --git a/cinder/volume/drivers/storpool.py b/cinder/volume/drivers/storpool.py
-index a1388677b..0d2903684 100644
+index 56b4070bc..d76957c88 100644
--- a/cinder/volume/drivers/storpool.py
+++ b/cinder/volume/drivers/storpool.py
-@@ -315,35 +315,6 @@ class StorPoolDriver(driver.VolumeDriver):
+@@ -95,6 +95,7 @@ class StorPoolDriver(driver.VolumeDriver):
+ connector will handle this.
+ - Detach temporary snapshots and volumes after copying data
+ to or from from Glance images.
++ - Drop backup_volume()
+ """
+
+ VERSION = '2.0.0'
+@@ -318,35 +319,6 @@ class StorPoolDriver(driver.VolumeDriver):
'pools': pools
}
@@ -51,5 +59,5 @@
req_id = context.request_id
volname = self._attach.volumeName(volume['id'])
--
-2.33.0
+2.35.1
diff --git a/patches/series.experimental b/patches/series.experimental
index 27fa64f..ac285e2 100644
--- a/patches/series.experimental
+++ b/patches/series.experimental
@@ -1,6 +1,9 @@
+openstack/cinder/glance-upload-uri.patch
+openstack/cinder/clone-across-pools.patch
openstack/cinder/leave-it-to-brick.patch
+openstack/cinder/storpool-do-detach.patch
openstack/cinder/storpool-rm-backup.patch
openstack/cinder/storpool-clone-volume.patch
-openstack/cinder/storpool-clone-image.patch
+openstack/cinder/storpool-clone-across.patch
openstack/cinderlib/storpool-test-20190910.patch
openstack/devstack/eatmydata.patch