exp: use the patches that I submitted for review.

Change-Id: I82b077496af46e728080afa3bf18390898f72aac
diff --git a/patches/openstack/cinder/clone-across-pools.patch b/patches/openstack/cinder/clone-across-pools.patch
new file mode 100644
index 0000000..3279d6d
--- /dev/null
+++ b/patches/openstack/cinder/clone-across-pools.patch
@@ -0,0 +1,370 @@
+From 8fafd470b34b4b94d0e6e65f1a6049232f06b9f8 Mon Sep 17 00:00:00 2001
+From: Peter Penchev <openstack-dev@storpool.com>
+Date: Wed, 22 Jun 2022 10:04:31 +0300
+Subject: [PATCH 2/8] Add the clone_across_pools driver capability
+
+Let drivers declare that they can clone a volume into a different pool and
+relax the checks when creating a volume from an image (both with and
+without the image cache).
+
+TODO: document this capability a bit more.
+
+Change-Id: Idbac4dae00f9fa03fb14547a204666721bf67d96
+Implements: blueprint clone-across-pools
+---
+ cinder/image/cache.py                         | 11 ++-
+ cinder/tests/unit/image/test_cache.py         | 14 ++--
+ .../volume/flows/test_create_volume_flow.py   | 73 +++++++++++++++++++
+ cinder/volume/flows/manager/create_volume.py  |  8 +-
+ cinder/volume/manager.py                      |  3 +-
+ doc/source/reference/support-matrix.ini       | 73 +++++++++++++++++++
+ 6 files changed, 169 insertions(+), 13 deletions(-)
+
+diff --git a/cinder/image/cache.py b/cinder/image/cache.py
+index 6d30210a4..a79451bf2 100644
+--- a/cinder/image/cache.py
++++ b/cinder/image/cache.py
+@@ -34,11 +34,13 @@ class ImageVolumeCache(object):
+                  db,
+                  volume_api,
+                  max_cache_size_gb: int = 0,
+-                 max_cache_size_count: int = 0):
++                 max_cache_size_count: int = 0,
++                 clone_across_pools: bool = False):
+         self.db = db
+         self.volume_api = volume_api
+         self.max_cache_size_gb = int(max_cache_size_gb)
+         self.max_cache_size_count = int(max_cache_size_count)
++        self.clone_across_pools = bool(clone_across_pools)
+         self.notifier = rpc.get_notifier('volume', CONF.host)
+ 
+     def get_by_image_volume(self,
+@@ -55,11 +57,12 @@ class ImageVolumeCache(object):
+         self._notify_cache_eviction(context, cache_entry['image_id'],
+                                     cache_entry['host'])
+ 
+-    @staticmethod
+-    def _get_query_filters(volume_ref: objects.Volume) -> dict:
++    def _get_query_filters(self, volume_ref: objects.Volume) -> dict:
+         if volume_ref.is_clustered:
+             return {'cluster_name': volume_ref.cluster_name}
+-        return {'host': volume_ref.host}
++        if not self.clone_across_pools:
++            return {'host': volume_ref.host}
++        return {}
+ 
+     def get_entry(self,
+                   context: context.RequestContext,
+diff --git a/cinder/tests/unit/image/test_cache.py b/cinder/tests/unit/image/test_cache.py
+index c3aba9b28..b8b704e0d 100644
+--- a/cinder/tests/unit/image/test_cache.py
++++ b/cinder/tests/unit/image/test_cache.py
+@@ -42,11 +42,12 @@ class ImageVolumeCacheTestCase(test.TestCase):
+         self.volume.update(vol_params)
+         self.volume_ovo = objects.Volume(self.context, **vol_params)
+ 
+-    def _build_cache(self, max_gb=0, max_count=0):
++    def _build_cache(self, max_gb=0, max_count=0, clone_across_pools=False):
+         cache = image_cache.ImageVolumeCache(self.mock_db,
+                                              self.mock_volume_api,
+                                              max_gb,
+-                                             max_count)
++                                             max_count,
++                                             clone_across_pools)
+         cache.notifier = self.notifier
+         return cache
+ 
+@@ -91,9 +92,10 @@ class ImageVolumeCacheTestCase(test.TestCase):
+         self.assertEqual(entry['image_id'], msg['payload']['image_id'])
+         self.assertEqual(1, len(self.notifier.notifications))
+ 
+-    @ddt.data(True, False)
+-    def test_get_entry(self, clustered):
+-        cache = self._build_cache()
++    @ddt.data((True, True), (True, False), (False, True), (False, False))
++    @ddt.unpack
++    def test_get_entry(self, clustered, clone_across_pools):
++        cache = self._build_cache(clone_across_pools=clone_across_pools)
+         entry = self._build_entry()
+         image_meta = {
+             'is_public': True,
+@@ -107,7 +109,7 @@ class ImageVolumeCacheTestCase(test.TestCase):
+          image_volume_cache_get_and_update_last_used.return_value) = entry
+         if not clustered:
+             self.volume_ovo.cluster_name = None
+-            expect = {'host': self.volume.host}
++            expect = {} if clone_across_pools else {'host': self.volume.host}
+         else:
+             expect = {'cluster_name': self.volume.cluster_name}
+         found_entry = cache.get_entry(self.context,
+diff --git a/cinder/tests/unit/volume/flows/test_create_volume_flow.py b/cinder/tests/unit/volume/flows/test_create_volume_flow.py
+index 2a7b625bc..b7d43008f 100644
+--- a/cinder/tests/unit/volume/flows/test_create_volume_flow.py
++++ b/cinder/tests/unit/volume/flows/test_create_volume_flow.py
+@@ -1058,6 +1058,7 @@ class CreateVolumeFlowManagerTestCase(test.TestCase):
+             self, volume_get_by_id, vol_update, rekey_vol, cleanup_cg):
+         fake_db = mock.MagicMock()
+         fake_driver = mock.MagicMock()
++        fake_driver.capabilities = {}
+         fake_volume_manager = mock.MagicMock()
+         fake_manager = create_volume_manager.CreateVolumeFromSpecTask(
+             fake_volume_manager, fake_db, fake_driver)
+@@ -1083,6 +1084,7 @@ class CreateVolumeFlowManagerTestCase(test.TestCase):
+                                   handle_bootable, cleanup_cg):
+         fake_db = mock.MagicMock()
+         fake_driver = mock.MagicMock()
++        fake_driver.capabilities = {}
+         fake_volume_manager = mock.MagicMock()
+         fake_manager = create_volume_manager.CreateVolumeFromSpecTask(
+             fake_volume_manager, fake_db, fake_driver)
+@@ -1108,6 +1110,7 @@ class CreateVolumeFlowManagerTestCase(test.TestCase):
+                                                  mock_cleanup_cg):
+         fake_db = mock.MagicMock()
+         fake_driver = mock.MagicMock()
++        fake_driver.capabilities = {}
+         fake_volume_manager = mock.MagicMock()
+         fake_manager = create_volume_manager.CreateVolumeFromSpecTask(
+             fake_volume_manager, fake_db, fake_driver)
+@@ -1144,6 +1147,7 @@ class CreateVolumeFlowManagerTestCase(test.TestCase):
+                                                 mock_cleanup_cg):
+         fake_db = mock.MagicMock()
+         fake_driver = mock.MagicMock()
++        fake_driver.capabilities = {}
+         fake_volume_manager = mock.MagicMock()
+         fake_cache = mock.MagicMock()
+         fake_manager = create_volume_manager.CreateVolumeFromSpecTask(
+@@ -1192,6 +1196,7 @@ class CreateVolumeFlowManagerTestCase(test.TestCase):
+                                                     mock_cleanup_cg):
+         fake_db = mock.MagicMock()
+         fake_driver = mock.MagicMock()
++        fake_driver.capabilities = {}
+         fake_volume_manager = mock.MagicMock()
+         fake_manager = create_volume_manager.CreateVolumeFromSpecTask(
+             fake_volume_manager, fake_db, fake_driver)
+@@ -1250,6 +1255,7 @@ class CreateVolumeFlowManagerTestCase(test.TestCase):
+                                 driver_error):
+         fake_db = mock.MagicMock()
+         fake_driver = mock.MagicMock()
++        fake_driver.capabilities = {}
+         fake_volume_manager = mock.MagicMock()
+         backup_host = 'host@backend#pool'
+         fake_manager = create_volume_manager.CreateVolumeFromSpecTask(
+@@ -1289,6 +1295,7 @@ class CreateVolumeFlowManagerTestCase(test.TestCase):
+     def test_create_drive_error(self, mock_message_create):
+         fake_db = mock.MagicMock()
+         fake_driver = mock.MagicMock()
++        fake_driver.capabilities = {}
+         fake_volume_manager = mock.MagicMock()
+         fake_manager = create_volume_manager.CreateVolumeFromSpecTask(
+             fake_volume_manager, fake_db, fake_driver)
+@@ -1490,6 +1497,7 @@ class CreateVolumeFlowManagerGlanceCinderBackendCase(test.TestCase):
+             spec=utils.get_file_spec())
+         fake_db = mock.MagicMock()
+         fake_driver = mock.MagicMock()
++        fake_driver.capabilities = {}
+         fake_manager = create_volume_manager.CreateVolumeFromSpecTask(
+             mock.MagicMock(), fake_db, fake_driver)
+         fake_image_service = fake_image.FakeImageService()
+@@ -1516,6 +1524,7 @@ class CreateVolumeFlowManagerGlanceCinderBackendCase(test.TestCase):
+                       'cinder_encryption_key_id': None}
+ 
+         fake_driver.clone_image.return_value = (None, False)
++        fake_db.volume_get_all.return_value = []
+         fake_db.volume_get_all_by_host.return_value = [image_volume]
+ 
+         fake_manager._create_from_image(self.ctxt,
+@@ -1534,6 +1543,69 @@ class CreateVolumeFlowManagerGlanceCinderBackendCase(test.TestCase):
+             self.assertFalse(fake_driver.create_cloned_volume.called)
+         mock_cleanup_cg.assert_called_once_with(volume)
+ 
++    @mock.patch('cinder.volume.flows.manager.create_volume.'
++                'CreateVolumeFromSpecTask.'
++                '_cleanup_cg_in_volume')
++    @mock.patch('cinder.image.image_utils.TemporaryImages.fetch')
++    @mock.patch('cinder.volume.flows.manager.create_volume.'
++                'CreateVolumeFromSpecTask.'
++                '_handle_bootable_volume_glance_meta')
++    @mock.patch('cinder.image.image_utils.qemu_img_info')
++    def test_create_from_image_across(self, mock_qemu_info, handle_bootable,
++                                      mock_fetch_img, mock_cleanup_cg,
++                                      format='raw', owner=None,
++                                      location=True):
++        self.flags(allowed_direct_url_schemes=['cinder'])
++        mock_fetch_img.return_value = mock.MagicMock(
++            spec=utils.get_file_spec())
++        fake_db = mock.MagicMock()
++        fake_driver = mock.MagicMock()
++        fake_driver.capabilities = {'clone_across_pools': True}
++        fake_manager = create_volume_manager.CreateVolumeFromSpecTask(
++            mock.MagicMock(), fake_db, fake_driver)
++        fake_image_service = fake_image.FakeImageService()
++
++        volume = fake_volume.fake_volume_obj(self.ctxt,
++                                             host='host@backend#pool')
++        image_volume = fake_volume.fake_volume_obj(self.ctxt,
++                                                   volume_metadata={})
++        image_id = fakes.IMAGE_ID
++        image_info = imageutils.QemuImgInfo()
++        image_info.virtual_size = '1073741824'
++        mock_qemu_info.return_value = image_info
++
++        url = 'cinder://%s' % image_volume['id']
++        image_location = None
++        if location:
++            image_location = (url, [{'url': url, 'metadata': {}}])
++        image_meta = {'id': image_id,
++                      'container_format': 'bare',
++                      'disk_format': format,
++                      'size': 1024,
++                      'owner': owner or self.ctxt.project_id,
++                      'virtual_size': None,
++                      'cinder_encryption_key_id': None}
++
++        fake_driver.clone_image.return_value = (None, False)
++        fake_db.volume_get_all.return_value = [image_volume]
++        fake_db.volume_get_all_by_host.return_value = []
++
++        fake_manager._create_from_image(self.ctxt,
++                                        volume,
++                                        image_location,
++                                        image_id,
++                                        image_meta,
++                                        fake_image_service)
++        if format == 'raw' and not owner and location:
++            fake_driver.create_cloned_volume.assert_called_once_with(
++                volume, image_volume)
++            handle_bootable.assert_called_once_with(self.ctxt, volume,
++                                                    image_id=image_id,
++                                                    image_meta=image_meta)
++        else:
++            self.assertFalse(fake_driver.create_cloned_volume.called)
++        mock_cleanup_cg.assert_called_once_with(volume)
++
+     LEGACY_URI = 'cinder://%s' % fakes.VOLUME_ID
+     MULTISTORE_URI = 'cinder://fake-store/%s' % fakes.VOLUME_ID
+ 
+@@ -1560,6 +1632,7 @@ class CreateVolumeFlowManagerGlanceCinderBackendCase(test.TestCase):
+             spec=utils.get_file_spec())
+         fake_db = mock.MagicMock()
+         fake_driver = mock.MagicMock()
++        fake_driver.capabilities = {}
+         fake_manager = create_volume_manager.CreateVolumeFromSpecTask(
+             mock.MagicMock(), fake_db, fake_driver)
+         fake_image_service = fake_image.FakeImageService()
+diff --git a/cinder/volume/flows/manager/create_volume.py b/cinder/volume/flows/manager/create_volume.py
+index 4c18d7d84..187f3b3ed 100644
+--- a/cinder/volume/flows/manager/create_volume.py
++++ b/cinder/volume/flows/manager/create_volume.py
+@@ -732,8 +732,12 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
+         urls = list(set([direct_url]
+                         + [loc.get('url') for loc in locations or []]))
+         image_volume_ids = self._extract_cinder_ids(urls)
+-        image_volumes = self.db.volume_get_all_by_host(
+-            context, volume['host'], filters={'id': image_volume_ids})
++        if self.driver.capabilities.get('clone_across_pools'):
++            image_volumes = self.db.volume_get_all(
++                context, filters={'id': image_volume_ids})
++        else:
++            image_volumes = self.db.volume_get_all_by_host(
++                context, volume['host'], filters={'id': image_volume_ids})
+ 
+         for image_volume in image_volumes:
+             # For the case image volume is stored in the service tenant,
+diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py
+index 804a1c6cb..ecf264d10 100644
+--- a/cinder/volume/manager.py
++++ b/cinder/volume/manager.py
+@@ -356,7 +356,8 @@ class VolumeManager(manager.CleanableManager,
+                 self.db,
+                 cinder_volume.API(),
+                 max_cache_size,
+-                max_cache_entries
++                max_cache_entries,
++                self.driver.capabilities.get('clone_across_pools', False)
+             )
+             LOG.info('Image-volume cache enabled for host %(host)s.',
+                      {'host': self.host})
+diff --git a/doc/source/reference/support-matrix.ini b/doc/source/reference/support-matrix.ini
+index a379139b9..38a279266 100644
+--- a/doc/source/reference/support-matrix.ini
++++ b/doc/source/reference/support-matrix.ini
+@@ -957,3 +957,76 @@ driver.vmware=missing
+ driver.win_iscsi=missing
+ driver.win_smb=missing
+ driver.zadara=missing
++
++[operation.clone_across_pools]
++title=Clone a volume into a different pool
++status=optional
++notes=Vendor drivers that support cloning a volume into a different
++      storage pool, e.g. when creating a volume from a Cinder-backed
++      Glance image.
++driver.datera=missing
++driver.dell_emc_powermax=missing
++driver.dell_emc_powerstore=missing
++driver.dell_emc_powervault=missing
++driver.dell_emc_sc=missing
++driver.dell_emc_unity=missing
++driver.dell_emc_vmax_af=missing
++driver.dell_emc_vmax_3=missing
++driver.dell_emc_vnx=missing
++driver.dell_emc_powerflex=missing
++driver.dell_emc_xtremio=missing
++driver.fujitsu_eternus=missing
++driver.hitachi_vsp=missing
++driver.hpe_3par=missing
++driver.hpe_msa=missing
++driver.hpe_nimble=missing
++driver.huawei_t_v1=missing
++driver.huawei_t_v2=missing
++driver.huawei_v3=missing
++driver.huawei_f_v3=missing
++driver.huawei_f_v5=missing
++driver.huawei_v5=missing
++driver.huawei_18000=missing
++driver.huawei_dorado=missing
++driver.huawei_fusionstorage=missing
++driver.infinidat=missing
++driver.ibm_ds8k=missing
++driver.ibm_flashsystem=missing
++driver.ibm_gpfs=missing
++driver.ibm_storwize=missing
++driver.ibm_xiv=missing
++driver.infortrend=missing
++driver.inspur=missing
++driver.inspur_as13000=missing
++driver.kaminario=missing
++driver.kioxia_kumoscale=missing
++driver.lenovo=missing
++driver.lightbits_lightos=missing
++driver.linbit_linstor=missing
++driver.lvm=missing
++driver.macrosan=missing
++driver.nec=missing
++driver.nec_v=missing
++driver.netapp_ontap=missing
++driver.netapp_solidfire=missing
++driver.nexenta=missing
++driver.nfs=missing
++driver.opene_joviandss=missing
++driver.prophetstor=missing
++driver.pure=missing
++driver.qnap=missing
++driver.quobyte=missing
++driver.rbd=missing
++driver.rbd_iscsi=missing
++driver.sandstone=missing
++driver.seagate=missing
++driver.storpool=missing
++driver.synology=missing
++driver.toyou_netstor=missing
++driver.vrtsaccess=missing
++driver.vrtscnfs=missing
++driver.vzstorage=missing
++driver.vmware=missing
++driver.win_iscsi=missing
++driver.win_smb=missing
++driver.zadara=missing
+-- 
+2.35.1
+