Reshuffle the patches, drop copy-volume-to-image and vice versa
The patches will be submitted upstream in a slightly different manner,
so reflect that in the CI runs.
Change-Id: I114cbb2fb8f15beb908f6ce574ef50d12f43d3d7
diff --git a/patches/openstack/cinder/sep-clone-across-pools.patch b/patches/openstack/cinder/sep-clone-across-pools.patch
new file mode 100644
index 0000000..40753e7
--- /dev/null
+++ b/patches/openstack/cinder/sep-clone-across-pools.patch
@@ -0,0 +1,371 @@
+From 6809a2569d281eb6217edc3b7ce9663a996186fb Mon Sep 17 00:00:00 2001
+From: Peter Penchev <openstack-dev@storpool.com>
+Date: Wed, 22 Jun 2022 10:04:31 +0300
+Subject: [PATCH 2/7] Add the clone_across_pools driver capability
+
+Let drivers declare that they can clone a volume into a different pool and
+relax the checks when creating a volume from an image (both with and
+without the image cache).
+
+Change-Id: Idbac4dae00f9fa03fb14547a204666721bf67d96
+Implements: blueprint clone-across-pools
+---
+ cinder/image/cache.py | 11 ++-
+ cinder/tests/unit/image/test_cache.py | 14 ++--
+ .../volume/flows/test_create_volume_flow.py | 73 ++++++++++++++++++
+ cinder/volume/flows/manager/create_volume.py | 8 +-
+ cinder/volume/manager.py | 3 +-
+ doc/source/reference/support-matrix.ini | 76 +++++++++++++++++++
+ 6 files changed, 172 insertions(+), 13 deletions(-)
+
+diff --git a/cinder/image/cache.py b/cinder/image/cache.py
+index 6d30210a4..a79451bf2 100644
+--- a/cinder/image/cache.py
++++ b/cinder/image/cache.py
+@@ -34,11 +34,13 @@ class ImageVolumeCache(object):
+ db,
+ volume_api,
+ max_cache_size_gb: int = 0,
+- max_cache_size_count: int = 0):
++ max_cache_size_count: int = 0,
++ clone_across_pools: bool = False):
+ self.db = db
+ self.volume_api = volume_api
+ self.max_cache_size_gb = int(max_cache_size_gb)
+ self.max_cache_size_count = int(max_cache_size_count)
++ self.clone_across_pools = bool(clone_across_pools)
+ self.notifier = rpc.get_notifier('volume', CONF.host)
+
+ def get_by_image_volume(self,
+@@ -55,11 +57,12 @@ class ImageVolumeCache(object):
+ self._notify_cache_eviction(context, cache_entry['image_id'],
+ cache_entry['host'])
+
+- @staticmethod
+- def _get_query_filters(volume_ref: objects.Volume) -> dict:
++ def _get_query_filters(self, volume_ref: objects.Volume) -> dict:
+ if volume_ref.is_clustered:
+ return {'cluster_name': volume_ref.cluster_name}
+- return {'host': volume_ref.host}
++ if not self.clone_across_pools:
++ return {'host': volume_ref.host}
++ return {}
+
+ def get_entry(self,
+ context: context.RequestContext,
+diff --git a/cinder/tests/unit/image/test_cache.py b/cinder/tests/unit/image/test_cache.py
+index c3aba9b28..b8b704e0d 100644
+--- a/cinder/tests/unit/image/test_cache.py
++++ b/cinder/tests/unit/image/test_cache.py
+@@ -42,11 +42,12 @@ class ImageVolumeCacheTestCase(test.TestCase):
+ self.volume.update(vol_params)
+ self.volume_ovo = objects.Volume(self.context, **vol_params)
+
+- def _build_cache(self, max_gb=0, max_count=0):
++ def _build_cache(self, max_gb=0, max_count=0, clone_across_pools=False):
+ cache = image_cache.ImageVolumeCache(self.mock_db,
+ self.mock_volume_api,
+ max_gb,
+- max_count)
++ max_count,
++ clone_across_pools)
+ cache.notifier = self.notifier
+ return cache
+
+@@ -91,9 +92,10 @@ class ImageVolumeCacheTestCase(test.TestCase):
+ self.assertEqual(entry['image_id'], msg['payload']['image_id'])
+ self.assertEqual(1, len(self.notifier.notifications))
+
+- @ddt.data(True, False)
+- def test_get_entry(self, clustered):
+- cache = self._build_cache()
++ @ddt.data((True, True), (True, False), (False, True), (False, False))
++ @ddt.unpack
++ def test_get_entry(self, clustered, clone_across_pools):
++ cache = self._build_cache(clone_across_pools=clone_across_pools)
+ entry = self._build_entry()
+ image_meta = {
+ 'is_public': True,
+@@ -107,7 +109,7 @@ class ImageVolumeCacheTestCase(test.TestCase):
+ image_volume_cache_get_and_update_last_used.return_value) = entry
+ if not clustered:
+ self.volume_ovo.cluster_name = None
+- expect = {'host': self.volume.host}
++ expect = {} if clone_across_pools else {'host': self.volume.host}
+ else:
+ expect = {'cluster_name': self.volume.cluster_name}
+ found_entry = cache.get_entry(self.context,
+diff --git a/cinder/tests/unit/volume/flows/test_create_volume_flow.py b/cinder/tests/unit/volume/flows/test_create_volume_flow.py
+index 5b4ddb35f..83880a9f9 100644
+--- a/cinder/tests/unit/volume/flows/test_create_volume_flow.py
++++ b/cinder/tests/unit/volume/flows/test_create_volume_flow.py
+@@ -1060,6 +1060,7 @@ class CreateVolumeFlowManagerTestCase(test.TestCase):
+ self, volume_get_by_id, vol_update, rekey_vol, cleanup_cg):
+ fake_db = mock.MagicMock()
+ fake_driver = mock.MagicMock()
++ fake_driver.capabilities = {}
+ fake_volume_manager = mock.MagicMock()
+ fake_manager = create_volume_manager.CreateVolumeFromSpecTask(
+ fake_volume_manager, fake_db, fake_driver)
+@@ -1085,6 +1086,7 @@ class CreateVolumeFlowManagerTestCase(test.TestCase):
+ handle_bootable, cleanup_cg):
+ fake_db = mock.MagicMock()
+ fake_driver = mock.MagicMock()
++ fake_driver.capabilities = {}
+ fake_volume_manager = mock.MagicMock()
+ fake_manager = create_volume_manager.CreateVolumeFromSpecTask(
+ fake_volume_manager, fake_db, fake_driver)
+@@ -1110,6 +1112,7 @@ class CreateVolumeFlowManagerTestCase(test.TestCase):
+ mock_cleanup_cg):
+ fake_db = mock.MagicMock()
+ fake_driver = mock.MagicMock()
++ fake_driver.capabilities = {}
+ fake_volume_manager = mock.MagicMock()
+ fake_manager = create_volume_manager.CreateVolumeFromSpecTask(
+ fake_volume_manager, fake_db, fake_driver)
+@@ -1146,6 +1149,7 @@ class CreateVolumeFlowManagerTestCase(test.TestCase):
+ mock_cleanup_cg):
+ fake_db = mock.MagicMock()
+ fake_driver = mock.MagicMock()
++ fake_driver.capabilities = {}
+ fake_volume_manager = mock.MagicMock()
+ fake_cache = mock.MagicMock()
+ fake_manager = create_volume_manager.CreateVolumeFromSpecTask(
+@@ -1194,6 +1198,7 @@ class CreateVolumeFlowManagerTestCase(test.TestCase):
+ mock_cleanup_cg):
+ fake_db = mock.MagicMock()
+ fake_driver = mock.MagicMock()
++ fake_driver.capabilities = {}
+ fake_volume_manager = mock.MagicMock()
+ fake_manager = create_volume_manager.CreateVolumeFromSpecTask(
+ fake_volume_manager, fake_db, fake_driver)
+@@ -1252,6 +1257,7 @@ class CreateVolumeFlowManagerTestCase(test.TestCase):
+ driver_error):
+ fake_db = mock.MagicMock()
+ fake_driver = mock.MagicMock()
++ fake_driver.capabilities = {}
+ fake_volume_manager = mock.MagicMock()
+ backup_host = 'host@backend#pool'
+ fake_manager = create_volume_manager.CreateVolumeFromSpecTask(
+@@ -1291,6 +1297,7 @@ class CreateVolumeFlowManagerTestCase(test.TestCase):
+ def test_create_drive_error(self, mock_message_create):
+ fake_db = mock.MagicMock()
+ fake_driver = mock.MagicMock()
++ fake_driver.capabilities = {}
+ fake_volume_manager = mock.MagicMock()
+ fake_manager = create_volume_manager.CreateVolumeFromSpecTask(
+ fake_volume_manager, fake_db, fake_driver)
+@@ -1492,6 +1499,7 @@ class CreateVolumeFlowManagerGlanceCinderBackendCase(test.TestCase):
+ spec=utils.get_file_spec())
+ fake_db = mock.MagicMock()
+ fake_driver = mock.MagicMock()
++ fake_driver.capabilities = {}
+ fake_manager = create_volume_manager.CreateVolumeFromSpecTask(
+ mock.MagicMock(), fake_db, fake_driver)
+ fake_image_service = fake_image.FakeImageService()
+@@ -1518,6 +1526,7 @@ class CreateVolumeFlowManagerGlanceCinderBackendCase(test.TestCase):
+ 'cinder_encryption_key_id': None}
+
+ fake_driver.clone_image.return_value = (None, False)
++ fake_db.volume_get_all.return_value = []
+ fake_db.volume_get_all_by_host.return_value = [image_volume]
+
+ fake_manager._create_from_image(self.ctxt,
+@@ -1536,6 +1545,69 @@ class CreateVolumeFlowManagerGlanceCinderBackendCase(test.TestCase):
+ self.assertFalse(fake_driver.create_cloned_volume.called)
+ mock_cleanup_cg.assert_called_once_with(volume)
+
++ @mock.patch('cinder.volume.flows.manager.create_volume.'
++ 'CreateVolumeFromSpecTask.'
++ '_cleanup_cg_in_volume')
++ @mock.patch('cinder.image.image_utils.TemporaryImages.fetch')
++ @mock.patch('cinder.volume.flows.manager.create_volume.'
++ 'CreateVolumeFromSpecTask.'
++ '_handle_bootable_volume_glance_meta')
++ @mock.patch('cinder.image.image_utils.qemu_img_info')
++ def test_create_from_image_across(self, mock_qemu_info, handle_bootable,
++ mock_fetch_img, mock_cleanup_cg,
++ format='raw', owner=None,
++ location=True):
++ self.flags(allowed_direct_url_schemes=['cinder'])
++ mock_fetch_img.return_value = mock.MagicMock(
++ spec=utils.get_file_spec())
++ fake_db = mock.MagicMock()
++ fake_driver = mock.MagicMock()
++ fake_driver.capabilities = {'clone_across_pools': True}
++ fake_manager = create_volume_manager.CreateVolumeFromSpecTask(
++ mock.MagicMock(), fake_db, fake_driver)
++ fake_image_service = fake_image.FakeImageService()
++
++ volume = fake_volume.fake_volume_obj(self.ctxt,
++ host='host@backend#pool')
++ image_volume = fake_volume.fake_volume_obj(self.ctxt,
++ volume_metadata={})
++ image_id = fakes.IMAGE_ID
++ image_info = imageutils.QemuImgInfo()
++ image_info.virtual_size = '1073741824'
++ mock_qemu_info.return_value = image_info
++
++ url = 'cinder://%s' % image_volume['id']
++ image_location = None
++ if location:
++ image_location = (url, [{'url': url, 'metadata': {}}])
++ image_meta = {'id': image_id,
++ 'container_format': 'bare',
++ 'disk_format': format,
++ 'size': 1024,
++ 'owner': owner or self.ctxt.project_id,
++ 'virtual_size': None,
++ 'cinder_encryption_key_id': None}
++
++ fake_driver.clone_image.return_value = (None, False)
++ fake_db.volume_get_all.return_value = [image_volume]
++ fake_db.volume_get_all_by_host.return_value = []
++
++ fake_manager._create_from_image(self.ctxt,
++ volume,
++ image_location,
++ image_id,
++ image_meta,
++ fake_image_service)
++ if format == 'raw' and not owner and location:
++ fake_driver.create_cloned_volume.assert_called_once_with(
++ volume, image_volume)
++ handle_bootable.assert_called_once_with(self.ctxt, volume,
++ image_id=image_id,
++ image_meta=image_meta)
++ else:
++ self.assertFalse(fake_driver.create_cloned_volume.called)
++ mock_cleanup_cg.assert_called_once_with(volume)
++
+ LEGACY_URI = 'cinder://%s' % fakes.VOLUME_ID
+ MULTISTORE_URI = 'cinder://fake-store/%s' % fakes.VOLUME_ID
+
+@@ -1562,6 +1634,7 @@ class CreateVolumeFlowManagerGlanceCinderBackendCase(test.TestCase):
+ spec=utils.get_file_spec())
+ fake_db = mock.MagicMock()
+ fake_driver = mock.MagicMock()
++ fake_driver.capabilities = {}
+ fake_manager = create_volume_manager.CreateVolumeFromSpecTask(
+ mock.MagicMock(), fake_db, fake_driver)
+ fake_image_service = fake_image.FakeImageService()
+diff --git a/cinder/volume/flows/manager/create_volume.py b/cinder/volume/flows/manager/create_volume.py
+index 0ae3cb59d..45d06ebb9 100644
+--- a/cinder/volume/flows/manager/create_volume.py
++++ b/cinder/volume/flows/manager/create_volume.py
+@@ -741,8 +741,12 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
+ urls = list(set([direct_url]
+ + [loc.get('url') for loc in locations or []]))
+ image_volume_ids = self._extract_cinder_ids(urls)
+- image_volumes = self.db.volume_get_all_by_host(
+- context, volume['host'], filters={'id': image_volume_ids})
++ if self.driver.capabilities.get('clone_across_pools'):
++ image_volumes = self.db.volume_get_all(
++ context, filters={'id': image_volume_ids})
++ else:
++ image_volumes = self.db.volume_get_all_by_host(
++ context, volume['host'], filters={'id': image_volume_ids})
+
+ for image_volume in image_volumes:
+ # For the case image volume is stored in the service tenant,
+diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py
+index 2dcf7ae30..79a0ae9a2 100644
+--- a/cinder/volume/manager.py
++++ b/cinder/volume/manager.py
+@@ -356,7 +356,8 @@ class VolumeManager(manager.CleanableManager,
+ self.db,
+ cinder_volume.API(),
+ max_cache_size,
+- max_cache_entries
++ max_cache_entries,
++ self.driver.capabilities.get('clone_across_pools', False)
+ )
+ LOG.info('Image-volume cache enabled for host %(host)s.',
+ {'host': self.host})
+diff --git a/doc/source/reference/support-matrix.ini b/doc/source/reference/support-matrix.ini
+index 384a633da..d01ecc15c 100644
+--- a/doc/source/reference/support-matrix.ini
++++ b/doc/source/reference/support-matrix.ini
+@@ -996,3 +996,79 @@ driver.win_iscsi=missing
+ driver.win_smb=missing
+ driver.yadro=complete
+ driver.zadara=missing
++
++[operation.clone_across_pools]
++title=Clone a volume into a different pool
++status=optional
++notes=Vendor drivers that support cloning a volume into a different
++ storage pool, e.g. when creating a volume from a Cinder-backed
++ Glance image.
++driver.datacore=missing
++driver.datera=missing
++driver.dell_emc_powermax=missing
++driver.dell_emc_powerstore=missing
++driver.dell_emc_powerstore_nfs=missing
++driver.dell_emc_powervault=missing
++driver.dell_emc_sc=missing
++driver.dell_emc_unity=missing
++driver.dell_emc_vmax_af=missing
++driver.dell_emc_vmax_3=missing
++driver.dell_emc_vnx=missing
++driver.dell_emc_powerflex=missing
++driver.dell_emc_xtremio=missing
++driver.fujitsu_eternus=missing
++driver.hitachi_vsp=missing
++driver.hpe_3par=missing
++driver.hpe_msa=missing
++driver.hpe_nimble=missing
++driver.huawei_t_v1=missing
++driver.huawei_t_v2=missing
++driver.huawei_v3=missing
++driver.huawei_f_v3=missing
++driver.huawei_f_v5=missing
++driver.huawei_v5=missing
++driver.huawei_18000=missing
++driver.huawei_dorado=missing
++driver.huawei_fusionstorage=missing
++driver.infinidat=missing
++driver.ibm_ds8k=missing
++driver.ibm_flashsystem=missing
++driver.ibm_gpfs=missing
++driver.ibm_storwize=missing
++driver.ibm_xiv=missing
++driver.infortrend=missing
++driver.inspur=missing
++driver.inspur_as13000=missing
++driver.kaminario=missing
++driver.kioxia_kumoscale=missing
++driver.lenovo=missing
++driver.lightbits_lightos=missing
++driver.linbit_linstor=missing
++driver.lvm=missing
++driver.macrosan=missing
++driver.nec=missing
++driver.nec_v=missing
++driver.netapp_ontap=missing
++driver.netapp_solidfire=missing
++driver.nexenta=missing
++driver.nfs=missing
++driver.opene_joviandss=missing
++driver.prophetstor=missing
++driver.pure=missing
++driver.qnap=missing
++driver.quobyte=missing
++driver.rbd=missing
++driver.rbd_iscsi=missing
++driver.sandstone=missing
++driver.seagate=missing
++driver.storpool=missing
++driver.synology=missing
++driver.toyou_netstor=missing
++driver.vrtsaccess=missing
++driver.vrtscnfs=missing
++driver.vzstorage=missing
++driver.vmware=missing
++driver.win_iscsi=missing
++driver.win_smb=missing
++driver.yadro=missing
++driver.zadara=missing
+--
+2.35.1
+
diff --git a/patches/openstack/cinder/sep-glance-upload-uri.patch b/patches/openstack/cinder/sep-glance-upload-uri.patch
new file mode 100644
index 0000000..2363ced
--- /dev/null
+++ b/patches/openstack/cinder/sep-glance-upload-uri.patch
@@ -0,0 +1,243 @@
+From 46facc035f5fa0cfba08a6493f08e05be2023d40 Mon Sep 17 00:00:00 2001
+From: Peter Penchev <openstack-dev@storpool.com>
+Date: Thu, 9 Jun 2022 01:43:28 +0300
+Subject: [PATCH 1/7] Send the correct location URI to the Glance v2 API
+
+When uploading a volume to an image, send the new-style
+cinder://<store-id>/<volume-id> URL to the Glance API if
+image_service:store_id is present in the volume type extra specs.
+
+Closes-Bug: #1978020
+Co-Authored-By: Rajat Dhasmana <rajatdhasmana@gmail.com>
+Change-Id: I815706f691a7d1e5a0c54eb15222417008ef1f34
+---
+ cinder/tests/unit/volume/test_image.py | 2 +-
+ .../tests/unit/volume/test_volume_manager.py | 160 ++++++++++++++++++
+ cinder/volume/manager.py | 4 +-
+ ...20-glance-upload-uri-8fbc70c442ac620c.yaml | 11 ++
+ 4 files changed, 175 insertions(+), 2 deletions(-)
+ create mode 100644 releasenotes/notes/bug-1978020-glance-upload-uri-8fbc70c442ac620c.yaml
+
+diff --git a/cinder/tests/unit/volume/test_image.py b/cinder/tests/unit/volume/test_image.py
+index ebc7904c4..2b1b9c392 100644
+--- a/cinder/tests/unit/volume/test_image.py
++++ b/cinder/tests/unit/volume/test_image.py
+@@ -366,7 +366,7 @@ class CopyVolumeToImageTestCase(base.BaseVolumeTestCase):
+ def test_copy_volume_to_image_with_image_volume(self):
+ image = self._test_copy_volume_to_image_with_image_volume()
+ self.assertTrue(image['locations'][0]['url'].startswith('cinder://'))
+- image_volume_id = image['locations'][0]['url'][9:]
++ image_volume_id = image['locations'][0]['url'].split('/')[-1]
+ # The image volume does NOT include the snapshot_id, and include the
+ # source_volid which is the uploaded-volume id.
+ vol_ref = db.volume_get(self.context, image_volume_id)
+diff --git a/cinder/tests/unit/volume/test_volume_manager.py b/cinder/tests/unit/volume/test_volume_manager.py
+index 341e7326f..9a7a50ff6 100644
+--- a/cinder/tests/unit/volume/test_volume_manager.py
++++ b/cinder/tests/unit/volume/test_volume_manager.py
+@@ -342,3 +342,163 @@ class VolumeManagerTestCase(base.BaseVolumeTestCase):
+
+ res = manager._driver_shares_targets()
+ self.assertFalse(res)
++
++ @mock.patch('cinder.message.api.API.create')
++ @mock.patch('cinder.volume.volume_utils.require_driver_initialized')
++ @mock.patch('cinder.volume.manager.VolumeManager._clone_image_volume')
++ @mock.patch('cinder.db.volume_metadata_update')
++ def test_clone_image_no_volume(self,
++ fake_update,
++ fake_clone,
++ fake_msg_create,
++ fake_init):
++ """Make sure nothing happens if no volume was created."""
++ manager = vol_manager.VolumeManager()
++
++ ctx = mock.sentinel.context
++ volume = fake_volume.fake_volume_obj(ctx)
++ image_service = mock.MagicMock(spec=[])
++
++ fake_clone.return_value = None
++
++ image_meta = {'disk_format': 'raw', 'container_format': 'ova'}
++ manager._clone_image_volume_and_add_location(ctx, volume,
++ image_service, image_meta)
++ fake_clone.assert_not_called()
++ fake_update.assert_not_called()
++
++ image_meta = {'disk_format': 'qcow2', 'container_format': 'bare'}
++ manager._clone_image_volume_and_add_location(ctx, volume,
++ image_service, image_meta)
++ fake_clone.assert_not_called()
++ fake_update.assert_not_called()
++
++ image_meta = {'disk_format': 'raw', 'container_format': 'bare'}
++ manager._clone_image_volume_and_add_location(ctx, volume,
++ image_service, image_meta)
++ fake_clone.assert_called_once_with(ctx, volume, image_meta)
++ fake_update.assert_not_called()
++
++ @mock.patch('cinder.message.api.API.create')
++ @mock.patch('cinder.objects.VolumeType.get_by_id')
++ @mock.patch('cinder.volume.volume_utils.require_driver_initialized')
++ @mock.patch('cinder.volume.manager.VolumeManager._clone_image_volume')
++ @mock.patch('cinder.db.volume_metadata_update')
++ def test_clone_image_no_store_id(self,
++ fake_update,
++ fake_clone,
++ fake_msg_create,
++ fake_volume_type_get,
++ fake_init):
++ """Send a cinder://<volume-id> URL if no store ID in extra specs."""
++ manager = vol_manager.VolumeManager()
++
++ project_id = fake.PROJECT_ID
++
++ ctx = mock.MagicMock()
++ ctx.elevated.return_value = ctx
++ ctx.project_id = project_id
++
++ vol_type = fake_volume.fake_volume_type_obj(
++ ctx,
++ id=fake.VOLUME_TYPE_ID,
++ name=fake.VOLUME_TYPE_NAME,
++ extra_specs={'volume_type_backend': 'unknown'})
++ fake_volume_type_get.return_value = vol_type
++
++ volume = fake_volume.fake_volume_obj(ctx,
++ id=fake.VOLUME_ID,
++ volume_type_id=vol_type.id)
++
++ image_volume_id = fake.VOLUME2_ID
++ image_volume = fake_volume.fake_volume_obj(ctx, id=image_volume_id)
++ url = 'cinder://%(vol)s' % {'vol': image_volume_id}
++
++ image_service = mock.MagicMock(spec=['add_location'])
++ image_meta_id = fake.IMAGE_ID
++ image_meta = {
++ 'id': image_meta_id,
++ 'disk_format': 'raw',
++ 'container_format': 'bare',
++ }
++ image_volume_meta = {
++ 'image_owner': project_id,
++ 'glance_image_id': image_meta_id,
++ }
++
++ fake_clone.return_value = image_volume
++
++ manager._clone_image_volume_and_add_location(ctx, volume,
++ image_service, image_meta)
++ fake_clone.assert_called_once_with(ctx, volume, image_meta)
++ fake_update.assert_called_with(ctx, image_volume_id,
++ image_volume_meta, False)
++ image_service.add_location.assert_called_once_with(ctx, image_meta_id,
++ url, {})
++
++ @mock.patch('cinder.message.api.API.create')
++ @mock.patch('cinder.objects.VolumeType.get_by_id')
++ @mock.patch('cinder.volume.volume_utils.require_driver_initialized')
++ @mock.patch('cinder.volume.manager.VolumeManager._clone_image_volume')
++ @mock.patch('cinder.db.volume_metadata_update')
++ def test_clone_image_with_store_id(self,
++ fake_update,
++ fake_clone,
++ fake_msg_create,
++ fake_volume_type_get,
++ fake_init):
++ """Send a cinder://<store-id>/<volume-id> URL."""
++ manager = vol_manager.VolumeManager()
++
++ project_id = fake.PROJECT_ID
++
++ ctx = mock.MagicMock()
++ ctx.elevated.return_value = ctx
++ ctx.project_id = project_id
++
++ store_id = 'muninn'
++ vol_type = fake_volume.fake_volume_type_obj(
++ ctx,
++ id=fake.VOLUME_TYPE_ID,
++ name=fake.VOLUME_TYPE_NAME,
++ extra_specs={
++ 'volume_type_backend': 'unknown',
++ 'image_service:store_id': store_id,
++ })
++ fake_volume_type_get.return_value = vol_type
++
++ volume = fake_volume.fake_volume_obj(ctx,
++ id=fake.VOLUME_ID,
++ volume_type_id=vol_type.id)
++
++ image_volume_id = '42'
++ image_volume = mock.MagicMock(spec=['id'])
++ image_volume.id = image_volume_id
++ url = 'cinder://%(store)s/%(vol)s' % {
++ 'store': store_id,
++ 'vol': image_volume_id,
++ }
++
++ image_service = mock.MagicMock(spec=['add_location'])
++ image_meta_id = fake.IMAGE_ID
++ image_meta = {
++ 'id': image_meta_id,
++ 'disk_format': 'raw',
++ 'container_format': 'bare',
++ }
++ image_volume_meta = {
++ 'image_owner': project_id,
++ 'glance_image_id': image_meta_id,
++ }
++
++ fake_clone.return_value = image_volume
++
++ manager._clone_image_volume_and_add_location(ctx, volume,
++ image_service, image_meta)
++ fake_clone.assert_called_once_with(ctx, volume, image_meta)
++ fake_update.assert_called_with(ctx, image_volume_id,
++ image_volume_meta, False)
++ image_service.add_location.assert_called_once_with(ctx,
++ image_meta_id,
++ url,
++ {'store': store_id})
+diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py
+index 57ac77931..2dcf7ae30 100644
+--- a/cinder/volume/manager.py
++++ b/cinder/volume/manager.py
+@@ -1723,7 +1723,6 @@ class VolumeManager(manager.CleanableManager,
+ image_volume_meta,
+ False)
+
+- uri = 'cinder://%s' % image_volume.id
+ image_registered = None
+
+ # retrieve store information from extra-specs
+@@ -1732,6 +1731,9 @@ class VolumeManager(manager.CleanableManager,
+
+ if store_id:
+ location_metadata['store'] = store_id
++ uri = 'cinder://%s/%s' % (store_id, image_volume.id)
++ else:
++ uri = 'cinder://%s' % image_volume.id
+
+ try:
+ image_registered = image_service.add_location(
+diff --git a/releasenotes/notes/bug-1978020-glance-upload-uri-8fbc70c442ac620c.yaml b/releasenotes/notes/bug-1978020-glance-upload-uri-8fbc70c442ac620c.yaml
+new file mode 100644
+index 000000000..04a909d3d
+--- /dev/null
++++ b/releasenotes/notes/bug-1978020-glance-upload-uri-8fbc70c442ac620c.yaml
+@@ -0,0 +1,11 @@
++---
++fixes:
++ - |
++ `Bug #1978020 <https://bugs.launchpad.net/cinder/+bug/1978020>`_: Fixed
++ uploading a volume to a Cinder-backed Glance image; if a store name is set
++ in the volume type's extra specs, it must also be sent to Glance as part of
++ the new image location URI.
++ Please note that while the `image_service:store_id` extra spec is
++ validated when it is set for the volume type, it is not validated later;
++ it is the operator's responsibility to make sure that the Glance store is
++ not renamed or removed or that the volume types are updated accordingly.
+--
+2.35.1
+
diff --git a/patches/openstack/cinder/sep-sp-clone-across-pools.patch b/patches/openstack/cinder/sep-sp-clone-across-pools.patch
new file mode 100644
index 0000000..ba8a79c
--- /dev/null
+++ b/patches/openstack/cinder/sep-sp-clone-across-pools.patch
@@ -0,0 +1,39 @@
+From 5851b3cbf356620b43e82afcd1b2c55ed1a9cb0a Mon Sep 17 00:00:00 2001
+From: Peter Penchev <openstack-dev@storpool.com>
+Date: Wed, 22 Jun 2022 10:48:25 +0300
+Subject: [PATCH 3/7] StorPool: declare the clone_across_pools capability
+
+Change-Id: I5338c6c4f53a448e495f695cd64b36b722cd947d
+---
+ cinder/volume/drivers/storpool.py | 1 +
+ doc/source/reference/support-matrix.ini | 2 +-
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/cinder/volume/drivers/storpool.py b/cinder/volume/drivers/storpool.py
+index 47685cb3f..328f76c00 100644
+--- a/cinder/volume/drivers/storpool.py
++++ b/cinder/volume/drivers/storpool.py
+@@ -308,6 +308,7 @@ class StorPoolDriver(driver.VolumeDriver):
+ 'driver_version': self.VERSION,
+ 'storage_protocol': constants.STORPOOL,
+
++ 'clone_across_pools': True,
+ 'sparse_copy_volume': True,
+
+ 'pools': pools
+diff --git a/doc/source/reference/support-matrix.ini b/doc/source/reference/support-matrix.ini
+index d01ecc15c..33c9b0b91 100644
+--- a/doc/source/reference/support-matrix.ini
++++ b/doc/source/reference/support-matrix.ini
+@@ -1061,7 +1061,7 @@ driver.rbd=missing
+ driver.rbd_iscsi=missing
+ driver.sandstone=missing
+ driver.seagate=missing
+-driver.storpool=missing
++driver.storpool=complete
+ driver.synology=missing
+ driver.toyou_netstor=missing
+ driver.vrtsaccess=missing
+--
+2.35.1
+
diff --git a/patches/openstack/cinder/sep-sp-clone-volume.patch b/patches/openstack/cinder/sep-sp-clone-volume.patch
new file mode 100644
index 0000000..abe5bc5
--- /dev/null
+++ b/patches/openstack/cinder/sep-sp-clone-volume.patch
@@ -0,0 +1,426 @@
+From 9565e7e32bee1e613f1e09a8989e1bad7f90fa08 Mon Sep 17 00:00:00 2001
+From: Peter Penchev <openstack-dev@storpool.com>
+Date: Wed, 20 Apr 2022 15:47:39 +0300
+Subject: [PATCH 7/7] StorPool: create_cloned_volume() improvements
+
+If the source and destination volumes are in the same StorPool template
+(as defined by either the volume type or the global config setting),
+forego the need to create the transient snapshot at all and use
+StorPool's "base this volume on that one" API call (which does the same
+thing internally, but much more efficiently and atomically).
+
+If the destination volume should be in a different StorPool template,
+then make sure that the transient snapshot is also in that template so
+that, if other volumes are cloned from the same source volume later,
+they can all use the same data underneath (the internal workings of
+StorPool will detect that all those snapshots are exactly the same and
+not duplicate any data in the destination template). This will avoid
+data duplication, sometimes with drastic results.
+
+Bump the minimum required version of the "storpool" third-party library
+for snapshotUpdate(template=...) support.
+
+Change-Id: Ib9bb76cf2e2f2b035b92e596b1ef185558b190d6
+---
+ .../unit/volume/drivers/test_storpool.py | 150 ++++++++++++++++--
+ cinder/volume/drivers/storpool.py | 84 ++++++++--
+ driver-requirements.txt | 2 +-
+ ...torpool-clone-better-dca90f40c9273de9.yaml | 6 +
+ 4 files changed, 208 insertions(+), 34 deletions(-)
+ create mode 100644 releasenotes/notes/storpool-clone-better-dca90f40c9273de9.yaml
+
+diff --git a/cinder/tests/unit/volume/drivers/test_storpool.py b/cinder/tests/unit/volume/drivers/test_storpool.py
+index 2e6f56526..65d4ed304 100644
+--- a/cinder/tests/unit/volume/drivers/test_storpool.py
++++ b/cinder/tests/unit/volume/drivers/test_storpool.py
+@@ -14,6 +14,7 @@
+ # under the License.
+
+
++import itertools
+ import re
+ import sys
+ from unittest import mock
+@@ -110,13 +111,33 @@ class MockAPI(object):
+ def snapshotCreate(self, vname, snap):
+ snapshots[snap['name']] = dict(volumes[vname])
+
++ def snapshotUpdate(self, snap, data):
++ sdata = snapshots[snap]
++ sdata.update(data)
++
+ def snapshotDelete(self, name):
+ del snapshots[name]
+
+ def volumeCreate(self, v):
+- if v['name'] in volumes:
++ name = v['name']
++ if name in volumes:
+ raise MockApiError('volume already exists')
+- volumes[v['name']] = v
++ data = dict(v)
++
++ if 'parent' in v and 'template' not in v:
++ sdata = snapshots[v['parent']]
++ if 'template' in sdata:
++ data['template'] = sdata['template']
++
++ if 'baseOn' in v and 'template' not in v:
++ vdata = volumes[v['baseOn']]
++ if 'template' in vdata:
++ data['template'] = vdata['template']
++
++ if 'template' not in data:
++ data['template'] = None
++
++ volumes[name] = data
+
+ def volumeDelete(self, name):
+ del volumes[name]
+@@ -171,6 +192,23 @@ fakeStorPool.spopenstack.AttachDB = MockAttachDB
+ fakeStorPool.sptypes.VolumeUpdateDesc = MockVolumeUpdateDesc
+
+
++class MockVolumeDB(object):
++ """Simulate a Cinder database with a volume_get() method."""
++
++ def __init__(self, vol_types=None):
++ """Store the specified volume types mapping if necessary."""
++ self.vol_types = vol_types if vol_types is not None else {}
++
++ def volume_get(self, _context, vid):
++ """Get a volume-like structure, only the fields we care about."""
++ # Still, try to at least make sure we know about that volume
++ return {
++ 'id': vid,
++ 'size': volumes[volumeName(vid)]['size'],
++ 'volume_type': self.vol_types.get(vid),
++ }
++
++
+ @ddt.ddt
+ class StorPoolTestCase(test.TestCase):
+
+@@ -260,6 +298,11 @@ class StorPoolTestCase(test.TestCase):
+ self.assertListEqual(sorted([volumeName(n) for n in names]),
+ sorted(volumes.keys()))
+
++ def assertSnapshotNames(self, specs):
++ self.assertListEqual(
++ sorted(snapshotName(spec[0], spec[1]) for spec in specs),
++ sorted(snapshots.keys()))
++
+ @mock_volume_types
+ def test_create_delete_volume(self):
+ self.assertVolumeNames([])
+@@ -272,7 +315,7 @@ class StorPoolTestCase(test.TestCase):
+ self.assertVolumeNames(('1',))
+ v = volumes[volumeName('1')]
+ self.assertEqual(1 * units.Gi, v['size'])
+- self.assertNotIn('template', v.keys())
++ self.assertIsNone(v['template'])
+ self.assertEqual(3, v['replication'])
+
+ caught = False
+@@ -292,7 +335,7 @@ class StorPoolTestCase(test.TestCase):
+ self.assertVolumeNames(('1',))
+ v = volumes[volumeName('1')]
+ self.assertEqual(2 * units.Gi, v['size'])
+- self.assertNotIn('template', v.keys())
++ self.assertIsNone(v['template'])
+ self.assertEqual(3, v['replication'])
+
+ self.driver.create_volume({'id': '2', 'name': 'v2', 'size': 3,
+@@ -300,7 +343,7 @@ class StorPoolTestCase(test.TestCase):
+ self.assertVolumeNames(('1', '2'))
+ v = volumes[volumeName('2')]
+ self.assertEqual(3 * units.Gi, v['size'])
+- self.assertNotIn('template', v.keys())
++ self.assertIsNone(v['template'])
+ self.assertEqual(3, v['replication'])
+
+ self.driver.create_volume({'id': '3', 'name': 'v2', 'size': 4,
+@@ -322,7 +365,7 @@ class StorPoolTestCase(test.TestCase):
+ # Make sure the dictionary is not corrupted somehow...
+ v = volumes[volumeName('1')]
+ self.assertEqual(2 * units.Gi, v['size'])
+- self.assertNotIn('template', v.keys())
++ self.assertIsNone(v['template'])
+ self.assertEqual(3, v['replication'])
+
+ for vid in ('1', '2', '3', '4'):
+@@ -386,16 +429,17 @@ class StorPoolTestCase(test.TestCase):
+ self.driver.extend_volume({'id': '1'}, 2)
+ self.assertEqual(2 * units.Gi, volumes[volumeName('1')]['size'])
+
+- self.driver.create_cloned_volume({'id': '2', 'name': 'clo', 'size': 3},
+- {'id': 1})
++ with mock.patch.object(self.driver, 'db', new=MockVolumeDB()):
++ self.driver.create_cloned_volume(
++ {'id': '2', 'name': 'clo', 'size': 3, 'volume_type': None},
++ {'id': 1})
+ self.assertVolumeNames(('1', '2'))
+ self.assertDictEqual({}, snapshots)
+- # Note: this would not be true in a real environment (the snapshot will
+- # have been deleted, the volume would have no parent), but with this
+- # fake implementation it helps us make sure that the second volume was
+- # created with the proper options.
+- self.assertEqual(volumes[volumeName('2')]['parent'],
+- snapshotName('clone', '2'))
++ # We do not provide a StorPool template name in either of the volumes'
++ # types, so create_cloned_volume() should take the baseOn shortcut.
++ vol2 = volumes[volumeName('2')]
++ self.assertEqual(vol2['baseOn'], volumeName('1'))
++ self.assertNotIn('parent', vol2)
+
+ self.driver.delete_volume({'id': 1})
+ self.driver.delete_volume({'id': 2})
+@@ -403,6 +447,78 @@ class StorPoolTestCase(test.TestCase):
+ self.assertDictEqual({}, volumes)
+ self.assertDictEqual({}, snapshots)
+
++ @ddt.data(*itertools.product(
++ [None] + [{'id': key} for key in sorted(volume_types.keys())],
++ [None] + [{'id': key} for key in sorted(volume_types.keys())]))
++ @ddt.unpack
++ @mock_volume_types
++ def test_create_cloned_volume(self, src_type, dst_type):
++ self.assertDictEqual({}, volumes)
++ self.assertDictEqual({}, snapshots)
++
++ src_template = (
++ None
++ if src_type is None
++ else volume_types[src_type['id']].get('storpool_template')
++ )
++ dst_template = (
++ None
++ if dst_type is None
++ else volume_types[dst_type['id']].get('storpool_template')
++ )
++ src_name = 's-none' if src_template is None else 's-' + src_template
++ dst_name = 'd-none' if dst_template is None else 'd-' + dst_template
++
++ snap_name = snapshotName('clone', '2')
++
++ vdata1 = {
++ 'id': '1',
++ 'name': src_name,
++ 'size': 1,
++ 'volume_type': src_type,
++ }
++ self.assertEqual(
++ self.driver._template_from_volume(vdata1),
++ src_template)
++ self.driver.create_volume(vdata1)
++ self.assertVolumeNames(('1',))
++
++ vdata2 = {
++ 'id': 2,
++ 'name': dst_name,
++ 'size': 1,
++ 'volume_type': dst_type,
++ }
++ self.assertEqual(
++ self.driver._template_from_volume(vdata2),
++ dst_template)
++ with mock.patch.object(self.driver, 'db',
++ new=MockVolumeDB(vol_types={'1': src_type})):
++ self.driver.create_cloned_volume(vdata2, {'id': '1'})
++ self.assertVolumeNames(('1', '2'))
++ vol2 = volumes[volumeName('2')]
++ self.assertEqual(vol2['template'], dst_template)
++
++ if src_template == dst_template:
++ self.assertEqual(vol2['baseOn'], volumeName('1'))
++ self.assertNotIn('parent', vol2)
++
++ self.assertDictEqual({}, snapshots)
++ else:
++ self.assertNotIn('baseOn', vol2)
++ self.assertEqual(vol2['parent'], snap_name)
++
++ self.assertSnapshotNames((('clone', '2'),))
++ self.assertEqual(snapshots[snap_name]['template'], dst_template)
++
++ self.driver.delete_volume({'id': '1'})
++ self.driver.delete_volume({'id': '2'})
++ if src_template != dst_template:
++ del snapshots[snap_name]
++
++ self.assertDictEqual({}, volumes)
++ self.assertDictEqual({}, snapshots)
++
+ @mock_volume_types
+ def test_config_replication(self):
+ self.assertVolumeNames([])
+@@ -422,7 +538,7 @@ class StorPoolTestCase(test.TestCase):
+ self.assertVolumeNames(('cfgrepl1',))
+ v = volumes[volumeName('cfgrepl1')]
+ self.assertEqual(3, v['replication'])
+- self.assertNotIn('template', v)
++ self.assertIsNone(v['template'])
+ self.driver.delete_volume({'id': 'cfgrepl1'})
+
+ self.driver.configuration.storpool_replication = 2
+@@ -436,7 +552,7 @@ class StorPoolTestCase(test.TestCase):
+ self.assertVolumeNames(('cfgrepl2',))
+ v = volumes[volumeName('cfgrepl2')]
+ self.assertEqual(2, v['replication'])
+- self.assertNotIn('template', v)
++ self.assertIsNone(v['template'])
+ self.driver.delete_volume({'id': 'cfgrepl2'})
+
+ self.driver.create_volume({'id': 'cfgrepl3', 'name': 'v1', 'size': 1,
+@@ -468,7 +584,7 @@ class StorPoolTestCase(test.TestCase):
+ self.assertVolumeNames(('cfgtempl1',))
+ v = volumes[volumeName('cfgtempl1')]
+ self.assertEqual(3, v['replication'])
+- self.assertNotIn('template', v)
++ self.assertIsNone(v['template'])
+ self.driver.delete_volume({'id': 'cfgtempl1'})
+
+ self.driver.create_volume({'id': 'cfgtempl2', 'name': 'v1', 'size': 1,
+diff --git a/cinder/volume/drivers/storpool.py b/cinder/volume/drivers/storpool.py
+index cdaf0043d..418e5750f 100644
+--- a/cinder/volume/drivers/storpool.py
++++ b/cinder/volume/drivers/storpool.py
+@@ -19,11 +19,13 @@ import platform
+
+ from oslo_config import cfg
+ from oslo_log import log as logging
++from oslo_utils import excutils
+ from oslo_utils import importutils
+ from oslo_utils import units
+ import six
+
+ from cinder.common import constants
++from cinder import context
+ from cinder import exception
+ from cinder.i18n import _
+ from cinder import interface
+@@ -197,30 +199,80 @@ class StorPoolDriver(driver.VolumeDriver):
+
+ def create_cloned_volume(self, volume, src_vref):
+ refname = self._attach.volumeName(src_vref['id'])
++ size = int(volume['size']) * units.Gi
++ volname = self._attach.volumeName(volume['id'])
++
++ src_volume = self.db.volume_get(
++ context.get_admin_context(),
++ src_vref['id'],
++ )
++ src_template = self._template_from_volume(src_volume)
++
++ template = self._template_from_volume(volume)
++ LOG.debug('clone volume id %(vol_id)s template %(template)s', {
++ 'vol_id': repr(volume['id']),
++ 'template': repr(template),
++ })
++ if template == src_template:
++ LOG.info('Using baseOn to clone a volume into the same template')
++ try:
++ self._attach.api().volumeCreate({
++ 'name': volname,
++ 'size': size,
++ 'baseOn': refname,
++ })
++ except spapi.ApiError as e:
++ raise self._backendException(e)
++
++ return None
++
+ snapname = self._attach.snapshotName('clone', volume['id'])
++ LOG.info(
++ 'A transient snapshot for a %(src)s -> %(dst)s template change',
++ {'src': src_template, 'dst': template})
+ try:
+ self._attach.api().snapshotCreate(refname, {'name': snapname})
+ except spapi.ApiError as e:
+- raise self._backendException(e)
++ if e.name != 'objectExists':
++ raise self._backendException(e)
+
+- size = int(volume['size']) * units.Gi
+- volname = self._attach.volumeName(volume['id'])
+ try:
+- self._attach.api().volumeCreate({
+- 'name': volname,
+- 'size': size,
+- 'parent': snapname
+- })
+- except spapi.ApiError as e:
+- raise self._backendException(e)
+- finally:
+ try:
+- self._attach.api().snapshotDelete(snapname)
++ self._attach.api().snapshotUpdate(
++ snapname,
++ {'template': template},
++ )
+ except spapi.ApiError as e:
+- # ARGH!
+- LOG.error("Could not delete the temp snapshot %(name)s: "
+- "%(msg)s",
+- {'name': snapname, 'msg': e})
++ raise self._backendException(e)
++
++ try:
++ self._attach.api().volumeCreate({
++ 'name': volname,
++ 'size': size,
++ 'parent': snapname
++ })
++ except spapi.ApiError as e:
++ raise self._backendException(e)
++
++ try:
++ self._attach.api().snapshotUpdate(
++ snapname,
++ {'tags': {'transient': '1.0'}},
++ )
++ except spapi.ApiError as e:
++ raise self._backendException(e)
++ except Exception:
++ with excutils.save_and_reraise_exception():
++ try:
++ LOG.warning(
++ 'Something went wrong, removing the transient snapshot'
++ )
++ self._attach.api().snapshotDelete(snapname)
++ except spapi.ApiError as e:
++ LOG.error(
++ 'Could not delete the %(name)s snapshot: %(err)s',
++ {'name': snapname, 'err': str(e)}
++ )
+
+ def create_export(self, context, volume, connector):
+ pass
+diff --git a/driver-requirements.txt b/driver-requirements.txt
+index 0240e7e78..5b992df2e 100644
+--- a/driver-requirements.txt
++++ b/driver-requirements.txt
+@@ -40,7 +40,7 @@ infi.dtypes.wwn # PSF
+ infi.dtypes.iqn # PSF
+
+ # Storpool
+-storpool>=4.0.0 # Apache-2.0
++storpool>=7.1.0 # Apache-2.0
+ storpool.spopenstack>=2.2.1 # Apache-2.0
+
+ # Datera
+diff --git a/releasenotes/notes/storpool-clone-better-dca90f40c9273de9.yaml b/releasenotes/notes/storpool-clone-better-dca90f40c9273de9.yaml
+new file mode 100644
+index 000000000..180427d9e
+--- /dev/null
++++ b/releasenotes/notes/storpool-clone-better-dca90f40c9273de9.yaml
+@@ -0,0 +1,6 @@
++---
++features:
++ - |
++ StorPool driver: improved the way volumes are clonsed into different
++ StorPool templates (exposed as Cinder storage pools) if requested,
++ eliminating some data duplication in the underlying StorPool cluster.
+--
+2.35.1
+
diff --git a/patches/openstack/cinder/sep-sp-leave-it-to-brick.patch b/patches/openstack/cinder/sep-sp-leave-it-to-brick.patch
new file mode 100644
index 0000000..fea0d2a
--- /dev/null
+++ b/patches/openstack/cinder/sep-sp-leave-it-to-brick.patch
@@ -0,0 +1,113 @@
+From 6c24a83a1b7c1c31a43088165492604f440e00c5 Mon Sep 17 00:00:00 2001
+From: Peter Penchev <openstack-dev@storpool.com>
+Date: Tue, 20 Apr 2021 17:46:41 +0300
+Subject: [PATCH 4/7] StorPool: drop _attach_volume() and _detach_volume()
+
+Our os-brick connector already handles the "keep track of which volume
+is attached for what reason" calls to storpool.spopenstack. However,
+we need to explicitly specify the access mode in initialize_connection()
+now, since our os-brick connector does not know how to divine it yet.
+(this will come later with the "attach snapshot" functionality)
+
+Incidentally, this fixes the "create an encrypted volume" flow, since
+our _attach_volume() implementation never actually returned a connector
+in the attachment info...
+
+Change-Id: I5da7ae04b87b4fd52a682a6545060e852174f6c8
+Closes-Bug: #1939241
+---
+ .../unit/volume/drivers/test_storpool.py | 4 +-
+ cinder/volume/drivers/storpool.py | 40 +------------------
+ ...ch-encrypted-volumes-783c723683b8f9a9.yaml | 7 ++++
+ 3 files changed, 11 insertions(+), 40 deletions(-)
+ create mode 100644 releasenotes/notes/bug-1939241-storpool-attach-encrypted-volumes-783c723683b8f9a9.yaml
+
+diff --git a/cinder/tests/unit/volume/drivers/test_storpool.py b/cinder/tests/unit/volume/drivers/test_storpool.py
+index a5f763b96..2e6f56526 100644
+--- a/cinder/tests/unit/volume/drivers/test_storpool.py
++++ b/cinder/tests/unit/volume/drivers/test_storpool.py
+@@ -225,7 +225,9 @@ class StorPoolTestCase(test.TestCase):
+ def test_initialize_connection_good(self, cid, hid, name):
+ c = self.driver.initialize_connection({'id': hid}, {'host': name})
+ self.assertEqual('storpool', c['driver_volume_type'])
+- self.assertDictEqual({'client_id': cid, 'volume': hid}, c['data'])
++ self.assertDictEqual({'client_id': cid, 'volume': hid,
++ 'access_mode': 'rw'},
++ c['data'])
+
+ def test_noop_functions(self):
+ self.driver.terminate_connection(None, None)
+diff --git a/cinder/volume/drivers/storpool.py b/cinder/volume/drivers/storpool.py
+index 328f76c00..353329085 100644
+--- a/cinder/volume/drivers/storpool.py
++++ b/cinder/volume/drivers/storpool.py
+@@ -168,6 +168,7 @@ class StorPoolDriver(driver.VolumeDriver):
+ 'data': {
+ 'client_id': self._storpool_client_id(connector),
+ 'volume': volume['id'],
++ 'access_mode': 'rw',
+ }}
+
+ def terminate_connection(self, volume, connector, **kwargs):
+@@ -314,45 +315,6 @@ class StorPoolDriver(driver.VolumeDriver):
+ 'pools': pools
+ }
+
+- def _attach_volume(self, context, volume, properties, remote=False):
+- if remote:
+- return super(StorPoolDriver, self)._attach_volume(
+- context, volume, properties, remote=remote)
+- req_id = context.request_id
+- req = self._attach.get().get(req_id, None)
+- if req is None:
+- req = {
+- 'volume': self._attach.volumeName(volume['id']),
+- 'type': 'cinder-attach',
+- 'id': context.request_id,
+- 'rights': 2,
+- 'volsnap': False,
+- 'remove_on_detach': True
+- }
+- self._attach.add(req_id, req)
+- name = req['volume']
+- self._attach.sync(req_id, None)
+- return {'device': {'path': '/dev/storpool/' + name,
+- 'storpool_attach_req': req_id}}, volume
+-
+- def _detach_volume(self, context, attach_info, volume, properties,
+- force=False, remote=False, ignore_errors=False):
+- if remote:
+- return super(StorPoolDriver, self)._detach_volume(
+- context, attach_info, volume, properties,
+- force=force, remote=remote, ignore_errors=ignore_errors)
+- try:
+- req_id = attach_info.get('device', {}).get(
+- 'storpool_attach_req', context.request_id)
+- req = self._attach.get()[req_id]
+- name = req['volume']
+- self._attach.sync(req_id, name)
+- if req.get('remove_on_detach', False):
+- self._attach.remove(req_id)
+- except BaseException:
+- if not ignore_errors:
+- raise
+-
+ def backup_volume(self, context, backup, backup_service):
+ volume = self.db.volume_get(context, backup['volume_id'])
+ req_id = context.request_id
+diff --git a/releasenotes/notes/bug-1939241-storpool-attach-encrypted-volumes-783c723683b8f9a9.yaml b/releasenotes/notes/bug-1939241-storpool-attach-encrypted-volumes-783c723683b8f9a9.yaml
+new file mode 100644
+index 000000000..b19904041
+--- /dev/null
++++ b/releasenotes/notes/bug-1939241-storpool-attach-encrypted-volumes-783c723683b8f9a9.yaml
+@@ -0,0 +1,7 @@
++---
++fixes:
++ - |
++ StorPool driver `bug #1939241
++ <https://bugs.launchpad.net/cinder/+bug/1939241>`_: Fixed the creation of
++ encrypted StorPool volumes by dropping the needlessly and incompletely
++ overridden `_attach_volume()` and `_detach_volume()` methods.
+--
+2.35.1
+
diff --git a/patches/openstack/cinder/sep-sp-rm-backup.patch b/patches/openstack/cinder/sep-sp-rm-backup.patch
new file mode 100644
index 0000000..76efaf2
--- /dev/null
+++ b/patches/openstack/cinder/sep-sp-rm-backup.patch
@@ -0,0 +1,55 @@
+From ade8da3e2df38eaf08717009cc1e874af01aca9d Mon Sep 17 00:00:00 2001
+From: Peter Penchev <openstack-dev@storpool.com>
+Date: Mon, 11 May 2020 11:02:53 +0300
+Subject: [PATCH 5/7] StorPool driver: remove the obsolete backup_volume()
+
+Follow suit with I984de3df803f12dbb95e3309e668b3fbd519e70f.
+
+Change-Id: Ia172452fd7c96dccfe54789d868fcf7b748322a3
+---
+ cinder/volume/drivers/storpool.py | 29 -----------------------------
+ 1 file changed, 29 deletions(-)
+
+diff --git a/cinder/volume/drivers/storpool.py b/cinder/volume/drivers/storpool.py
+index 353329085..cccda8ecb 100644
+--- a/cinder/volume/drivers/storpool.py
++++ b/cinder/volume/drivers/storpool.py
+@@ -315,35 +315,6 @@ class StorPoolDriver(driver.VolumeDriver):
+ 'pools': pools
+ }
+
+- def backup_volume(self, context, backup, backup_service):
+- volume = self.db.volume_get(context, backup['volume_id'])
+- req_id = context.request_id
+- volname = self._attach.volumeName(volume['id'])
+- name = self._attach.volsnapName(volume['id'], req_id)
+- try:
+- self._attach.api().snapshotCreate(volname, {'name': name})
+- except spapi.ApiError as e:
+- raise self._backendException(e)
+- self._attach.add(req_id, {
+- 'volume': name,
+- 'type': 'backup',
+- 'id': req_id,
+- 'rights': 1,
+- 'volsnap': True
+- })
+- try:
+- return super(StorPoolDriver, self).backup_volume(
+- context, backup, backup_service)
+- finally:
+- self._attach.remove(req_id)
+- try:
+- self._attach.api().snapshotDelete(name)
+- except spapi.ApiError as e:
+- LOG.error(
+- 'Could not remove the temp snapshot %(name)s for '
+- '%(vol)s: %(err)s',
+- {'name': name, 'vol': volname, 'err': e})
+-
+ def copy_volume_to_image(self, context, volume, image_service, image_meta):
+ req_id = context.request_id
+ volname = self._attach.volumeName(volume['id'])
+--
+2.35.1
+
diff --git a/patches/openstack/cinder/sep-sp-rm-copy-volimg.patch b/patches/openstack/cinder/sep-sp-rm-copy-volimg.patch
new file mode 100644
index 0000000..da8be40
--- /dev/null
+++ b/patches/openstack/cinder/sep-sp-rm-copy-volimg.patch
@@ -0,0 +1,71 @@
+From d93a652c373a95e8ebcadd6ee002f725deebbdbd Mon Sep 17 00:00:00 2001
+From: Peter Penchev <openstack-dev@storpool.com>
+Date: Mon, 26 Sep 2022 16:04:36 +0300
+Subject: [PATCH 6/7] StorPool: drop copy_image_to_volume() and
+ copy_volume_to_image()
+
+These methods seem to be leftovers from a bygone era when the parent
+driver could not or would not attach volumes safely.
+
+Change-Id: I6e1a9026e677aee6c7ccad908fe6f92dc253762a
+---
+ cinder/volume/drivers/storpool.py | 43 -------------------------------
+ 1 file changed, 43 deletions(-)
+
+diff --git a/cinder/volume/drivers/storpool.py b/cinder/volume/drivers/storpool.py
+index cccda8ecb..cdaf0043d 100644
+--- a/cinder/volume/drivers/storpool.py
++++ b/cinder/volume/drivers/storpool.py
+@@ -315,49 +315,6 @@ class StorPoolDriver(driver.VolumeDriver):
+ 'pools': pools
+ }
+
+- def copy_volume_to_image(self, context, volume, image_service, image_meta):
+- req_id = context.request_id
+- volname = self._attach.volumeName(volume['id'])
+- name = self._attach.volsnapName(volume['id'], req_id)
+- try:
+- self._attach.api().snapshotCreate(volname, {'name': name})
+- except spapi.ApiError as e:
+- raise self._backendException(e)
+- self._attach.add(req_id, {
+- 'volume': name,
+- 'type': 'copy-from',
+- 'id': req_id,
+- 'rights': 1,
+- 'volsnap': True
+- })
+- try:
+- return super(StorPoolDriver, self).copy_volume_to_image(
+- context, volume, image_service, image_meta)
+- finally:
+- self._attach.remove(req_id)
+- try:
+- self._attach.api().snapshotDelete(name)
+- except spapi.ApiError as e:
+- LOG.error(
+- 'Could not remove the temp snapshot %(name)s for '
+- '%(vol)s: %(err)s',
+- {'name': name, 'vol': volname, 'err': e})
+-
+- def copy_image_to_volume(self, context, volume, image_service, image_id):
+- req_id = context.request_id
+- name = self._attach.volumeName(volume['id'])
+- self._attach.add(req_id, {
+- 'volume': name,
+- 'type': 'copy-to',
+- 'id': req_id,
+- 'rights': 2
+- })
+- try:
+- return super(StorPoolDriver, self).copy_image_to_volume(
+- context, volume, image_service, image_id)
+- finally:
+- self._attach.remove(req_id)
+-
+ def extend_volume(self, volume, new_size):
+ size = int(new_size) * units.Gi
+ name = self._attach.volumeName(volume['id'])
+--
+2.35.1
+
diff --git a/patches/series.experimental b/patches/series.experimental
index e15d6de..62f87f2 100644
--- a/patches/series.experimental
+++ b/patches/series.experimental
@@ -1,10 +1,9 @@
-openstack/cinder/glance-upload-uri.patch
-openstack/cinder/clone-across-pools.patch
-openstack/cinder/leave-it-to-brick.patch
-openstack/cinder/storpool-do-detach.patch
-openstack/cinder/storpool-rm-backup.patch
-openstack/cinder/storpool-clone-volume.patch
-openstack/cinder/storpool-clone-across.patch
-openstack/cinder/storpool-iscsi.patch
+openstack/cinder/sep-glance-upload-uri.patch
+openstack/cinder/sep-clone-across-pools.patch
+openstack/cinder/sep-sp-clone-across-pools.patch
+openstack/cinder/sep-sp-leave-it-to-brick.patch
+openstack/cinder/sep-sp-rm-backup.patch
+openstack/cinder/sep-sp-rm-copy-volimg.patch
+openstack/cinder/sep-sp-clone-volume.patch
openstack/cinderlib/storpool-test-20190910.patch
openstack/devstack/eatmydata.patch