exp: Temporarily renegenerate the iSCSI patch
Change-Id: I45a32c0d8b914140e554232628d0c96c4ff8a1e9
diff --git a/patches/openstack/cinder/sep-sp-iscsi.patch b/patches/openstack/cinder/sep-sp-iscsi.patch
index e516e14..819055a 100644
--- a/patches/openstack/cinder/sep-sp-iscsi.patch
+++ b/patches/openstack/cinder/sep-sp-iscsi.patch
@@ -1,6 +1,6 @@
-From 6ae905667806b39fccb7079ba2f61ba850a9fde3 Mon Sep 17 00:00:00 2001
+From 06914652d6de93e3d14209ff6a78b930826f0823 Mon Sep 17 00:00:00 2001
From: Peter Penchev <openstack-dev@storpool.com>
-Date: Tue, 5 Nov 2024 11:28:47 +0200
+Date: Mon, 12 Mar 2018 12:00:10 +0200
Subject: [PATCH 9/9] Add iSCSI export support to the StorPool driver
Add four new driver options:
@@ -25,38 +25,53 @@
Change-Id: I9de64306e0e6976268df782053b0651dd1cca96f
---
- .../unit/volume/drivers/test_storpool.py | 438 +++++++++++++++++-
- cinder/volume/drivers/storpool.py | 408 +++++++++++++++-
- .../drivers/storpool-volume-driver.rst | 64 ++-
- 3 files changed, 899 insertions(+), 11 deletions(-)
+ .../unit/volume/drivers/test_storpool.py | 437 +++++++++++++++++-
+ cinder/volume/drivers/storpool.py | 374 ++++++++++++++-
+ .../drivers/storpool-volume-driver.rst | 60 ++-
+ .../storpool-iscsi-cefcfe590a07c5c7.yaml | 13 +
+ 4 files changed, 874 insertions(+), 10 deletions(-)
+ create mode 100644 releasenotes/notes/storpool-iscsi-cefcfe590a07c5c7.yaml
diff --git a/cinder/tests/unit/volume/drivers/test_storpool.py b/cinder/tests/unit/volume/drivers/test_storpool.py
-index 671bc8def..2afc72820 100644
+index a6c894ab9..78c9f14a8 100644
--- a/cinder/tests/unit/volume/drivers/test_storpool.py
+++ b/cinder/tests/unit/volume/drivers/test_storpool.py
-@@ -15,15 +15,19 @@
+@@ -14,14 +14,24 @@
+ # under the License.
- import copy
++from __future__ import annotations
++
+import dataclasses
import itertools
- import os
import re
+ import sys
+from typing import Any, NamedTuple, TYPE_CHECKING # noqa: H301
from unittest import mock
import ddt
from oslo_utils import units
++if TYPE_CHECKING:
++ if sys.version_info >= (3, 11):
++ from typing import Self
++ else:
++ from typing_extensions import Self
++
+
+ fakeStorPool = mock.Mock()
+ fakeStorPool.spopenstack = mock.Mock()
+@@ -31,12 +41,21 @@ fakeStorPool.sptypes = mock.Mock()
+ sys.modules['storpool'] = fakeStorPool
+
+
+from cinder.common import constants
from cinder import exception
+from cinder.tests.unit import fake_constants as fconst
from cinder.tests.unit import test
from cinder.volume import configuration as conf
from cinder.volume.drivers import storpool as driver
-@@ -55,6 +59,12 @@ SP_CONF = {
- 'SP_OURID': '1'
- }
+
+_ISCSI_IQN_OURS = 'beleriand'
+_ISCSI_IQN_OTHER = 'rohan'
@@ -64,24 +79,64 @@
+_ISCSI_PAT_OTHER = 'roh*'
+_ISCSI_PAT_BOTH = '*riand roh*'
+_ISCSI_PORTAL_GROUP = 'openstack_pg'
-
++
volume_types = {
1: {},
-@@ -93,6 +103,10 @@ def snapshotName(vtype, vid, more=None):
- )
+ 2: {'storpool_template': 'ssd'},
+@@ -73,6 +92,10 @@ def snapshotName(vtype, vid):
+ return 'os--snap--{t}--{id}'.format(t=vtype, id=vid)
+def targetName(vid):
+ return 'iqn.2012-11.storpool:{id}'.format(id=vid)
+
+
- class MockAPI(object):
- def __init__(self, *args):
- self._disks = {}
-@@ -203,6 +217,241 @@ class MockVolumeDB(object):
- 'volume_type': self.vol_types.get(vid),
- }
+ class MockDisk(object):
+ def __init__(self, diskId):
+ self.id = diskId
+@@ -197,6 +220,273 @@ def MockVolumeUpdateDesc(size):
+ return {'size': size}
+
++@dataclasses.dataclass(frozen=True)
++class MockIscsiNetwork:
++ """Mock a StorPool IP CIDR network definition (partially)."""
++
++ address: str
++
++
++@dataclasses.dataclass(frozen=True)
++class MockIscsiPortalGroup:
++ """Mock a StorPool iSCSI portal group definition (partially)."""
++
++ name: str
++ networks: list[MockIscsiNetwork]
++
++
++@dataclasses.dataclass(frozen=True)
++class MockIscsiExport:
++ """Mock a StorPool iSCSI exported volume/target definition."""
++
++ portalGroup: str
++ target: str
++
++
++@dataclasses.dataclass(frozen=True)
++class MockIscsiInitiator:
++ """Mock a StorPool iSCSI initiator definition."""
++
++ name: str
++ exports: list[MockIscsiExport]
++
++
++@dataclasses.dataclass(frozen=True)
++class MockIscsiTarget:
++ """Mock a StorPool iSCSI volume-to-target mapping definition."""
++
++ name: str
++ volume: str
++
++
+class IscsiTestCase(NamedTuple):
+ """A single test case for the iSCSI config and export methods."""
+
@@ -95,81 +150,92 @@
+class MockIscsiConfig:
+ """Mock the structure returned by the "get current config" query."""
+
++ portalGroups: dict[str, MockIscsiPortalGroup]
++ initiators: dict[str, MockIscsiInitiator]
++ targets: dict[str, MockIscsiTarget]
++
+ @classmethod
-+ def build(cls, tcase: IscsiTestCase) -> dict:
++ def build(cls, tcase: IscsiTestCase) -> Self:
+ """Build a test config structure."""
+ initiators = {
-+ '0': {'name': _ISCSI_IQN_OTHER, 'exports': []},
++ '0': MockIscsiInitiator(name=_ISCSI_IQN_OTHER, exports=[]),
+ }
+ if tcase.initiator is not None:
-+ initiators['1'] = {
-+ 'name': tcase.initiator,
-+ 'exports': (
++ initiators['1'] = MockIscsiInitiator(
++ name=tcase.initiator,
++ exports=(
+ [
-+ {
-+ 'portalGroup': _ISCSI_PORTAL_GROUP,
-+ 'target': targetName(tcase.volume),
-+ },
++ MockIscsiExport(
++ portalGroup=_ISCSI_PORTAL_GROUP,
++ target=targetName(tcase.volume),
++ ),
+ ]
+ if tcase.exported
+ else []
+ ),
-+ }
++ )
+
+ targets = {
-+ '0': {
-+ 'name': targetName(fconst.VOLUME2_ID),
-+ 'volume': volumeName(fconst.VOLUME2_ID),
-+ },
++ '0': MockIscsiTarget(
++ name=targetName(fconst.VOLUME2_ID),
++ volume=volumeName(fconst.VOLUME2_ID),
++ ),
+ }
+ if tcase.volume is not None:
-+ targets['1'] = {
-+ 'name': targetName(tcase.volume),
-+ 'volume': volumeName(tcase.volume),
-+ }
++ targets['1'] = MockIscsiTarget(
++ name=targetName(tcase.volume),
++ volume=volumeName(tcase.volume),
++ )
+
-+ return {
-+
-+ 'portalGroups': {
-+ '0': {
-+ 'name': _ISCSI_PORTAL_GROUP + '-not',
-+ 'networks': [],
-+ },
-+ '1': {
-+ 'name': _ISCSI_PORTAL_GROUP,
-+ 'networks': [
-+ {'address': "192.0.2.0"},
-+ {'address': "195.51.100.0"},
++ return cls(
++ portalGroups={
++ '0': MockIscsiPortalGroup(
++ name=_ISCSI_PORTAL_GROUP + '-not',
++ networks=[],
++ ),
++ '1': MockIscsiPortalGroup(
++ name=_ISCSI_PORTAL_GROUP,
++ networks=[
++ MockIscsiNetwork(address="192.0.2.0"),
++ MockIscsiNetwork(address="195.51.100.0"),
+ ],
-+ },
++ ),
+ },
-+ 'initiators': initiators,
-+ 'targets': targets,
++ initiators=initiators,
++ targets=targets,
++ )
+
-+ }
+
++@dataclasses.dataclass(frozen=True)
++class MockIscsiConfigTop:
++ """Mock the top level of the "get the iSCSI configuration" response."""
++
++ iscsi: MockIscsiConfig
+
+
+class MockIscsiAPI:
+ """Mock only the iSCSI-related calls of the StorPool API bindings."""
+
+ _asrt: test.TestCase
-+ _configs: list[dict]
++ _configs: list[MockIscsiConfig]
+
+ def __init__(
+ self,
-+ configs: list[dict],
++ configs: list[MockIscsiConfig],
+ asrt: test.TestCase,
+ ) -> None:
+ """Store the reference to the list of iSCSI config objects."""
+ self._asrt = asrt
+ self._configs = configs
+
-+ def get_iscsi_config(self) -> dict:
++ def iSCSIConfig(self) -> MockIscsiConfigTop:
+ """Return the last version of the iSCSI configuration."""
-+ return {'iscsi': self._configs[-1]}
++ return MockIscsiConfigTop(iscsi=self._configs[-1])
+
-+ def _handle_export(self, cfg: dict, cmd: dict[str, Any]) -> dict:
++ def _handle_export(
++ self,
++ cfg: MockIscsiConfig, cmd: dict[str, Any],
++ ) -> MockIscsiConfig:
+ """Add an export for an initiator."""
+ self._asrt.assertDictEqual(
+ cmd,
@@ -179,43 +245,30 @@
+ 'volumeName': volumeName(fconst.VOLUME_ID),
+ },
+ )
-+ self._asrt.assertEqual(cfg['initiators']['1']['name'], cmd['initiator'])
-+ self._asrt.assertListEqual(cfg['initiators']['1']['exports'], [])
++ self._asrt.assertEqual(cfg.initiators['1'].name, cmd['initiator'])
++ self._asrt.assertListEqual(cfg.initiators['1'].exports, [])
+
-+ cfg['initiators'] = {
-+ **cfg['initiators'],
-+ '1': {
-+ **cfg['initiators']['1'],
-+ 'exports': [
-+ {
-+ 'portalGroup': cmd['portalGroup'],
-+ 'target': targetName(fconst.VOLUME_ID),
-+ },
-+ ],
-+ },
-+ }
-+ return cfg
-+
-+ def _handle_delete_export(self, cfg: dict, cmd: dict[str, Any]) -> dict:
-+ """Delete an export for an initiator."""
-+ self._asrt.assertDictEqual(
-+ cmd,
-+ {
-+ 'initiator': _ISCSI_IQN_OURS,
-+ 'portalGroup': _ISCSI_PORTAL_GROUP,
-+ 'volumeName': volumeName(fconst.VOLUME_ID),
++ return dataclasses.replace(
++ cfg,
++ initiators={
++ **cfg.initiators,
++ '1': dataclasses.replace(
++ cfg.initiators['1'],
++ exports=[
++ MockIscsiExport(
++ portalGroup=cmd['portalGroup'],
++ target=targetName(fconst.VOLUME_ID),
++ ),
++ ],
++ ),
+ },
+ )
-+ self._asrt.assertEqual(cfg['initiators']['1']['name'], cmd['initiator'])
-+ self._asrt.assertListEqual(
-+ cfg['initiators']['1']['exports'],
-+ [{'portalGroup': _ISCSI_PORTAL_GROUP,
-+ 'target': cfg['targets']['1']['name']}])
+
-+ del cfg['initiators']['1']
-+ return cfg
-+
-+ def _handle_create_initiator(self, cfg: dict, cmd: dict[str, Any]) -> dict:
++ def _handle_create_initiator(
++ self,
++ cfg: MockIscsiConfig,
++ cmd: dict[str, Any],
++ ) -> MockIscsiConfig:
+ """Add a whole new initiator."""
+ self._asrt.assertDictEqual(
+ cmd,
@@ -227,49 +280,45 @@
+ )
+ self._asrt.assertNotIn(
+ cmd['name'],
-+ [init['name'] for init in cfg['initiators'].values()],
++ [init.name for init in cfg.initiators.values()],
+ )
-+ self._asrt.assertListEqual(sorted(cfg['initiators']), ['0'])
++ self._asrt.assertListEqual(sorted(cfg.initiators), ['0'])
+
-+ cfg['initiators'] = {
-+ **cfg['initiators'],
-+ '1': {'name': cmd['name'], 'exports': []},
-+ }
-+ return cfg
++ return dataclasses.replace(
++ cfg,
++ initiators={
++ **cfg.initiators,
++ '1': MockIscsiInitiator(name=cmd['name'], exports=[]),
++ },
++ )
+
-+
-+ def _handle_create_target(self, cfg: dict, cmd: dict[str, Any]) -> dict:
++ def _handle_create_target(
++ self,
++ cfg: MockIscsiConfig,
++ cmd: dict[str, Any],
++ ) -> MockIscsiConfig:
+ """Add a target for a volume so that it may be exported."""
+ self._asrt.assertDictEqual(
+ cmd,
+ {'volumeName': volumeName(fconst.VOLUME_ID)},
+ )
-+ self._asrt.assertListEqual(sorted(cfg['targets']), ['0'])
-+ cfg['targets'] = {
-+ **cfg['targets'],
-+ '1': {
-+ 'name': targetName(fconst.VOLUME_ID),
-+ 'volume': volumeName(fconst.VOLUME_ID),
++ self._asrt.assertListEqual(sorted(cfg.targets), ['0'])
++ return dataclasses.replace(
++ cfg,
++ targets={
++ **cfg.targets,
++ '1': MockIscsiTarget(
++ name=targetName(fconst.VOLUME_ID),
++ volume=volumeName(fconst.VOLUME_ID),
++ ),
+ },
-+ }
-+ return cfg
-+
-+ def _handle_delete_target(self, cfg: dict, cmd: dict[str, Any]) -> dict:
-+ """Remove a target for a volume."""
-+ self._asrt.assertDictEqual(
-+ cmd,
-+ {'volumeName': volumeName(fconst.VOLUME_ID)},
+ )
+
-+ self._asrt.assertListEqual(sorted(cfg['targets']), ['0', '1'])
-+ del cfg['targets']['1']
-+ return cfg
-+
+ def _handle_initiator_add_network(
+ self,
-+ cfg: dict,
++ cfg: MockIscsiConfig,
+ cmd: dict[str, Any],
-+ ) -> dict:
++ ) -> MockIscsiConfig:
+ """Add a network that an initiator is allowed to log in from."""
+ self._asrt.assertDictEqual(
+ cmd,
@@ -278,18 +327,16 @@
+ 'net': '0.0.0.0/0',
+ },
+ )
-+ return cfg
++ return dataclasses.replace(cfg)
+
+ _CMD_HANDLERS = {
+ 'createInitiator': _handle_create_initiator,
+ 'createTarget': _handle_create_target,
-+ 'deleteTarget': _handle_delete_target,
+ 'export': _handle_export,
-+ 'exportDelete': _handle_delete_export,
+ 'initiatorAddNetwork': _handle_initiator_add_network,
+ }
+
-+ def post_iscsi_config(
++ def iSCSIConfigChange(
+ self,
+ commands: dict[str, list[dict[str, dict[str, Any]]]],
+ ) -> None:
@@ -317,10 +364,11 @@
+ IscsiTestCase(_ISCSI_IQN_OURS, fconst.VOLUME_ID, True, 0),
+]
+
-
++
def MockSPConfig(section = 's01'):
res = {}
-@@ -382,7 +631,15 @@ class StorPoolTestCase(test.TestCase):
+ m = re.match('^s0*([A-Za-z0-9]+)$', section)
+@@ -239,7 +529,15 @@ class StorPoolTestCase(test.TestCase):
self.cfg.volume_backend_name = 'storpool_test'
self.cfg.storpool_template = None
self.cfg.storpool_replication = 3
@@ -336,8 +384,8 @@
mock_exec = mock.Mock()
mock_exec.return_value = ('', '')
-@@ -399,7 +656,7 @@ class StorPoolTestCase(test.TestCase):
- self.driver.check_for_setup_error()
+@@ -248,7 +546,7 @@ class StorPoolTestCase(test.TestCase):
+ self.driver.check_for_setup_error()
@ddt.data(
- (5, TypeError),
@@ -345,7 +393,7 @@
({'no-host': None}, KeyError),
({'host': 'sbad'}, driver.StorPoolConfigurationInvalid),
({'host': 's01'}, None),
-@@ -415,7 +672,7 @@ class StorPoolTestCase(test.TestCase):
+@@ -264,7 +562,7 @@ class StorPoolTestCase(test.TestCase):
conn)
@ddt.data(
@@ -354,7 +402,7 @@
({'no-host': None}, KeyError),
({'host': 'sbad'}, driver.StorPoolConfigurationInvalid),
)
-@@ -454,7 +711,7 @@ class StorPoolTestCase(test.TestCase):
+@@ -317,7 +615,7 @@ class StorPoolTestCase(test.TestCase):
self.assertEqual(21, pool['total_capacity_gb'])
self.assertEqual(5, int(pool['free_capacity_gb']))
@@ -363,10 +411,10 @@
self.assertFalse(pool['QoS_support'])
self.assertFalse(pool['thick_provisioning_support'])
self.assertTrue(pool['thin_provisioning_support'])
-@@ -857,3 +1114,178 @@ class StorPoolTestCase(test.TestCase):
- 'No such volume',
- self.driver.revert_to_snapshot, None,
- {'id': vol_id}, {'id': snap_id})
+@@ -735,6 +1033,139 @@ class StorPoolTestCase(test.TestCase):
+ 'volume_type': volume_type
+ }))
+
+
+ @ddt.data(
+ # The default values
@@ -422,16 +470,16 @@
+
+ def _validate_iscsi_config(
+ self,
-+ cfg: dict,
++ cfg: MockIscsiConfig,
+ res: dict[str, Any],
+ tcase: IscsiTestCase,
+ ) -> None:
+ """Make sure the returned structure makes sense."""
+ initiator = res['initiator']
-+ cfg_initiator = cfg['initiators'].get('1')
++ cfg_initiator = cfg.initiators.get('1')
+
-+ self.assertIs(res['cfg']['iscsi'], cfg)
-+ self.assertEqual(res['pg']['name'], _ISCSI_PORTAL_GROUP)
++ self.assertIs(res['cfg'].iscsi, cfg)
++ self.assertEqual(res['pg'].name, _ISCSI_PORTAL_GROUP)
+
+ if tcase.initiator is None:
+ self.assertIsNone(initiator)
@@ -443,7 +491,7 @@
+ self.assertIsNone(res['target'])
+ else:
+ self.assertIsNotNone(res['target'])
-+ self.assertEqual(res['target'], cfg['targets'].get('1'))
++ self.assertEqual(res['target'], cfg.targets.get('1'))
+
+ if tcase.initiator is None:
+ self.assertIsNone(cfg_initiator)
@@ -452,7 +500,7 @@
+ self.assertIsNotNone(cfg_initiator)
+ if tcase.exported:
+ self.assertIsNotNone(res['export'])
-+ self.assertEqual(res['export'], cfg_initiator['exports'][0])
++ self.assertEqual(res['export'], cfg_initiator.exports[0])
+ else:
+ self.assertIsNone(res['export'])
+
@@ -462,7 +510,7 @@
+ cfg_orig = MockIscsiConfig.build(tcase)
+ configs = [cfg_orig]
+ iapi = MockIscsiAPI(configs, self)
-+ with mock.patch.object(self.driver, '_sp_api', iapi):
++ with mock.patch.object(self.driver._attach, 'api', new=lambda: iapi):
+ res = self.driver._get_iscsi_config(
+ _ISCSI_IQN_OURS,
+ fconst.VOLUME_ID,
@@ -476,7 +524,7 @@
+ cfg_orig = MockIscsiConfig.build(tcase)
+ configs = [cfg_orig]
+ iapi = MockIscsiAPI(configs, self)
-+ with mock.patch.object(self.driver, '_sp_api', iapi):
++ with mock.patch.object(self.driver._attach, 'api', new=lambda: iapi):
+ self.driver._create_iscsi_export(
+ {
+ 'id': fconst.VOLUME_ID,
@@ -491,81 +539,31 @@
+
+ self.assertEqual(len(configs), tcase.commands_count + 1)
+ cfg_final = configs[-1]
-+ self.assertEqual(cfg_final['initiators']['1']['name'], _ISCSI_IQN_OURS)
++ self.assertEqual(cfg_final.initiators['1'].name, _ISCSI_IQN_OURS)
+ self.assertEqual(
-+ cfg_final['initiators']['1']['exports'][0]['target'],
++ cfg_final.initiators['1'].exports[0].target,
+ targetName(fconst.VOLUME_ID),
+ )
+ self.assertEqual(
-+ cfg_final['targets']['1']['volume'],
++ cfg_final.targets['1'].volume,
+ volumeName(fconst.VOLUME_ID),
+ )
-+
-+ @ddt.data(*_ISCSI_TEST_CASES)
-+ def test_remove_iscsi_export(self, tcase: IscsiTestCase):
-+ cfg_orig = MockIscsiConfig.build(tcase)
-+ configs = [cfg_orig]
-+ iapi = MockIscsiAPI(configs, self)
-+
-+ def _target_exists(cfg: dict, volume: str) -> bool:
-+ for name, target in cfg['targets'].items():
-+ if target['volume'] == volumeName(volume):
-+ return True
-+ return False
-+
-+ def _export_exists(cfg: dict, volume: str) -> bool:
-+ for name, initiator in cfg['initiators'].items():
-+ for export in initiator['exports']:
-+ if export['target'] == targetName(volume):
-+ return True
-+ return False
-+
-+ if tcase.exported:
-+ self.assertTrue(
-+ _target_exists(iapi.get_iscsi_config()['iscsi'], tcase.volume))
-+ self.assertTrue(
-+ _export_exists(iapi.get_iscsi_config()['iscsi'], tcase.volume))
-+
-+ with mock.patch.object(self.driver, '_sp_api', iapi):
-+ self.driver._remove_iscsi_export(
-+ {
-+ 'id': fconst.VOLUME_ID,
-+ 'display_name': fconst.VOLUME_NAME,
-+ },
-+ {
-+ 'host': _ISCSI_IQN_OURS + '.hostname',
-+ 'initiator': _ISCSI_IQN_OURS,
-+ },
-+ )
-+
-+ self.assertFalse(
-+ _target_exists(iapi.get_iscsi_config()['iscsi'], tcase.volume))
-+ self.assertFalse(
-+ _export_exists(iapi.get_iscsi_config()['iscsi'], tcase.volume))
+ @mock_volume_types
+ def test_volume_revert(self):
+ vol_id = 'rev1'
diff --git a/cinder/volume/drivers/storpool.py b/cinder/volume/drivers/storpool.py
-index edafb7b21..2d7e25ced 100644
+index a30e918cb..021e72322 100644
--- a/cinder/volume/drivers/storpool.py
+++ b/cinder/volume/drivers/storpool.py
-@@ -17,6 +17,7 @@
+@@ -15,6 +15,7 @@
- import configparser
- import errno
+ """StorPool block device driver"""
+
+import fnmatch
- import http.client
- import json
- import os
-@@ -28,7 +29,10 @@ import time
- from oslo_config import cfg
- from oslo_log import log as logging
- from oslo_utils import excutils
-+from oslo_utils import netutils
- from oslo_utils import units
-+from oslo_utils import uuidutils
-+import six
+ import platform
- from cinder.common import constants
- from cinder import context
-@@ -46,6 +50,31 @@ DEV_STORPOOL_BYID = pathlib.Path('/dev/storpool-byid')
+ from oslo_config import cfg
+@@ -43,6 +44,31 @@ if storpool:
storpool_opts = [
@@ -597,69 +595,15 @@
cfg.StrOpt('storpool_template',
default=None,
help='The StorPool template for volumes with no type.'),
-@@ -61,6 +90,28 @@ CONF = cfg.CONF
- CONF.register_opts(storpool_opts, group=configuration.SHARED_CONF_GROUP)
-
-
-+def _extract_cinder_ids(urls):
-+ ids = []
-+ for url in urls:
-+ # The url can also be None and a TypeError is raised
-+ # TypeError: a bytes-like object is required, not 'str'
-+ if not url:
-+ continue
-+ parts = netutils.urlsplit(url)
-+ if parts.scheme == 'cinder':
-+ if parts.path:
-+ vol_id = parts.path.split('/')[-1]
-+ else:
-+ vol_id = parts.netloc
-+ if uuidutils.is_uuid_like(vol_id):
-+ ids.append(vol_id)
-+ else:
-+ LOG.debug("Ignoring malformed image location uri "
-+ "'%(url)s'", {'url': url})
-+
-+ return ids
-+
-+
- class StorPoolConfigurationInvalid(exception.CinderException):
- message = _("Invalid parameter %(param)s in the %(section)s section "
- "of the /etc/storpool.conf file: %(error)s")
-@@ -233,6 +284,12 @@ class StorPoolAPI:
- self._api_call(
- 'POST', f'/ctrl/1.0/MultiCluster/SnapshotDelete/{snapshot}')
-
-+ def get_iscsi_config(self):
-+ return self._api_call('GET', '/ctrl/1.0/iSCSIConfig')
-+
-+ def post_iscsi_config(self, data):
-+ return self._api_call('POST', '/ctrl/1.0/iSCSIConfig', data)
-+
-
- @interface.volumedriver
- class StorPoolDriver(driver.VolumeDriver):
-@@ -267,9 +324,10 @@ class StorPoolDriver(driver.VolumeDriver):
- 2.1.0 - Use a new in-tree API client to communicate with the
- StorPool API instead of packages `storpool` and
- `storpool.spopenstack`
-+ 2.2.0 - Add iSCSI export support.
- """
-
-- VERSION = '2.1.0'
-+ VERSION = '2.2.0'
- CI_WIKI_NAME = 'StorPool_distributed_storage_CI'
-
- def __init__(self, *args, **kwargs):
-@@ -280,6 +338,7 @@ class StorPoolDriver(driver.VolumeDriver):
+@@ -109,6 +135,7 @@ class StorPoolDriver(driver.VolumeDriver):
+ self._ourId = None
self._ourIdInt = None
- self._sp_api = None
- self._volume_prefix = None
+ self._attach = None
+ self._use_iscsi = None
@staticmethod
def get_driver_options():
-@@ -354,10 +413,326 @@ class StorPoolDriver(driver.VolumeDriver):
+@@ -177,10 +204,326 @@ class StorPoolDriver(driver.VolumeDriver):
raise StorPoolConfigurationInvalid(
section=hostname, param='SP_OURID', error=e)
@@ -722,11 +666,11 @@
+ will be needed to create, ensure, or remove the iSCSI export of
+ the specified volume to the specified initiator.
+ """
-+ cfg = self._sp_api.get_iscsi_config()
++ cfg = self._attach.api().iSCSIConfig()
+
+ pg_name = self.configuration.iscsi_portal_group
+ pg_found = [
-+ pg for pg in cfg['iscsi']['portalGroups'].values() if pg['name'] == pg_name
++ pg for pg in cfg.iscsi.portalGroups.values() if pg.name == pg_name
+ ]
+ if not pg_found:
+ raise Exception('StorPool Cinder iSCSI configuration error: '
@@ -735,7 +679,7 @@
+
+ # Do we know about this initiator?
+ i_found = [
-+ init for init in cfg['iscsi']['initiators'].values() if init['name'] == iqn
++ init for init in cfg.iscsi.initiators.values() if init.name == iqn
+ ]
+ if i_found:
+ initiator = i_found[0]
@@ -743,9 +687,9 @@
+ initiator = None
+
+ # Is this volume already being exported?
-+ volname = self._os_to_sp_volume_name(volume_id)
++ volname = self._attach.volumeName(volume_id)
+ t_found = [
-+ tgt for tgt in cfg['iscsi']['targets'].values() if tgt['volume'] == volname
++ tgt for tgt in cfg.iscsi.targets.values() if tgt.volume == volname
+ ]
+ if t_found:
+ target = t_found[0]
@@ -756,8 +700,8 @@
+ export = None
+ if initiator is not None and target is not None:
+ e_found = [
-+ exp for exp in initiator['exports']
-+ if exp['portalGroup'] == pg['name'] and exp['target'] == target['name']
++ exp for exp in initiator.exports
++ if exp.portalGroup == pg.name and exp.target == target.name
+ ]
+ if e_found:
+ export = e_found[0]
@@ -804,7 +748,7 @@
+ LOG.info('Creating a StorPool iSCSI initiator '
+ 'for "{host}s" ({iqn}s)',
+ {'host': connector['host'], 'iqn': iqn})
-+ self._sp_api.post_iscsi_config({
++ self._attach.api().iSCSIConfigChange({
+ 'commands': [
+ {
+ 'createInitiator': {
@@ -831,7 +775,7 @@
+ 'vol_id': volume['id'],
+ }
+ )
-+ self._sp_api.post_iscsi_config({
++ self._attach.api().iSCSIConfigChange({
+ 'commands': [
+ {
+ 'createTarget': {
@@ -852,14 +796,14 @@
+ 'vol_id': volume['id'],
+ 'host': connector['host'],
+ 'iqn': iqn,
-+ 'pg': cfg['pg']['name']
++ 'pg': cfg['pg'].name
+ })
-+ self._sp_api.post_iscsi_config({
++ self._attach.api().iSCSIConfigChange({
+ 'commands': [
+ {
+ 'export': {
+ 'initiator': iqn,
-+ 'portalGroup': cfg['pg']['name'],
++ 'portalGroup': cfg['pg'].name,
+ 'volumeName': cfg['volume_name'],
+ },
+ },
@@ -867,10 +811,10 @@
+ })
+
+ target_portals = [
-+ "{addr}:3260".format(addr=net['address'])
-+ for net in cfg['pg']['networks']
++ "{addr}:3260".format(addr=net.address)
++ for net in cfg['pg'].networks
+ ]
-+ target_iqns = [cfg['target']['name']] * len(target_portals)
++ target_iqns = [cfg['target'].name] * len(target_portals)
+ target_luns = [0] * len(target_portals)
+ if connector.get('multipath', False):
+ multipath_settings = {
@@ -925,32 +869,32 @@
+ 'vol_id': volume['id'],
+ 'host': connector['host'],
+ 'iqn': connector['initiator'],
-+ 'pg': cfg['pg']['name'],
++ 'pg': cfg['pg'].name,
+ })
+ try:
-+ self._sp_api.post_iscsi_config({
++ self._attach.api().iSCSIConfigChange({
+ 'commands': [
+ {
+ 'exportDelete': {
-+ 'initiator': cfg['initiator']['name'],
-+ 'portalGroup': cfg['pg']['name'],
++ 'initiator': cfg['initiator'].name,
++ 'portalGroup': cfg['pg'].name,
+ 'volumeName': cfg['volume_name'],
+ },
+ },
+ ]
+ })
-+ except StorPoolAPIError as e:
++ except spapi.ApiError as e:
+ if e.name not in ('objectExists', 'objectDoesNotExist'):
+ raise
+ LOG.info('Looks like somebody beat us to it')
+
+ if cfg['target'] is not None:
+ last = True
-+ for initiator in cfg['cfg']['iscsi']['initiators'].values():
-+ if initiator['name'] == cfg['initiator']['name']:
++ for initiator in cfg['cfg'].iscsi.initiators.values():
++ if initiator.name == cfg['initiator'].name:
+ continue
-+ for exp in initiator['exports']:
-+ if exp['target'] == cfg['target']['name']:
++ for exp in initiator.exports:
++ if exp.target == cfg['target'].name:
+ last = False
+ break
+ if not last:
@@ -966,7 +910,7 @@
+ }
+ )
+ try:
-+ self._sp_api.post_iscsi_config({
++ self._attach.api().iSCSIConfigChange({
+ 'commands': [
+ {
+ 'deleteTarget': {
@@ -975,7 +919,7 @@
+ },
+ ]
+ })
-+ except StorPoolAPIError as e:
++ except spapi.ApiError as e:
+ if e.name not in ('objectDoesNotExist', 'invalidParam'):
+ raise
+ LOG.info('Looks like somebody beat us to it')
@@ -986,7 +930,7 @@
return {'driver_volume_type': 'storpool',
'data': {
'client_id': self._storpool_client_id(connector),
-@@ -366,6 +741,9 @@ class StorPoolDriver(driver.VolumeDriver):
+@@ -189,6 +532,9 @@ class StorPoolDriver(driver.VolumeDriver):
}}
def terminate_connection(self, volume, connector, **kwargs):
@@ -996,7 +940,7 @@
pass
def create_snapshot(self, snapshot):
-@@ -467,11 +845,20 @@ class StorPoolDriver(driver.VolumeDriver):
+@@ -293,11 +639,20 @@ class StorPoolDriver(driver.VolumeDriver):
)
def create_export(self, context, volume, connector):
@@ -1016,9 +960,9 @@
+ return super()._attach_volume(context, volume, properties, remote)
+
def delete_volume(self, volume):
- name = self._os_to_sp_volume_name(volume['id'])
+ name = self._attach.volumeName(volume['id'])
try:
-@@ -505,6 +892,17 @@ class StorPoolDriver(driver.VolumeDriver):
+@@ -334,6 +689,17 @@ class StorPoolDriver(driver.VolumeDriver):
LOG.error("StorPoolDriver API initialization failed: %s", e)
raise
@@ -1035,8 +979,8 @@
+
def _update_volume_stats(self):
try:
- dl = self._sp_api.disks_list()
-@@ -530,7 +928,7 @@ class StorPoolDriver(driver.VolumeDriver):
+ dl = self._attach.api().disksList()
+@@ -359,7 +725,7 @@ class StorPoolDriver(driver.VolumeDriver):
'total_capacity_gb': total / units.Gi,
'free_capacity_gb': free / units.Gi,
'reserved_percentage': 0,
@@ -1045,7 +989,7 @@
'QoS_support': False,
'thick_provisioning_support': False,
'thin_provisioning_support': True,
-@@ -549,7 +947,9 @@ class StorPoolDriver(driver.VolumeDriver):
+@@ -378,7 +744,9 @@ class StorPoolDriver(driver.VolumeDriver):
'volume_backend_name') or 'storpool',
'vendor_name': 'StorPool',
'driver_version': self.VERSION,
@@ -1057,7 +1001,7 @@
'clone_across_pools': True,
'sparse_copy_volume': True,
diff --git a/doc/source/configuration/block-storage/drivers/storpool-volume-driver.rst b/doc/source/configuration/block-storage/drivers/storpool-volume-driver.rst
-index d2c5895a9..1ba0d2862 100644
+index d2c5895a9..1f3d46cce 100644
--- a/doc/source/configuration/block-storage/drivers/storpool-volume-driver.rst
+++ b/doc/source/configuration/block-storage/drivers/storpool-volume-driver.rst
@@ -19,12 +19,15 @@ Prerequisites
@@ -1080,7 +1024,7 @@
* All nodes that need to access the StorPool API (the compute nodes and
the node running the ``cinder-volume`` service) must have the following
-@@ -34,6 +37,33 @@ Prerequisites
+@@ -34,6 +37,29 @@ Prerequisites
* the storpool Python bindings package
* the storpool.spopenstack Python helper package
@@ -1107,14 +1051,10 @@
+so that only the ``cinder-volume`` service will use the iSCSI protocol when
+attaching volumes and snapshots to transfer data to and from Glance images.
+
-+Multiattach support for StorPool is only enabled if iSCSI is used:
-+``iscsi_export_to`` is set to ``*``, that is, when all StorPool volumes
-+will be exported via iSCSI to all initiators.
-+
Configuring the StorPool volume driver
--------------------------------------
-@@ -55,6 +85,32 @@ volume backend definition) and per volume type:
+@@ -55,6 +81,32 @@ volume backend definition) and per volume type:
with the default placement constraints for the StorPool cluster.
The default value for the chain replication is 3.
@@ -1135,7 +1075,7 @@
+- ``iscsi_cinder_volume``: if enabled, even if the ``iscsi_export_to`` option
+ has its default empty value, the ``cinder-volume`` service will use iSCSI
+ to attach the volumes and snapshots for transferring data to and from
-+ Glance images if Glance is configured to use the Cinder glance_store.
++ Glance images.
+
+- ``iscsi_learn_initiator_iqns``: if enabled, the StorPool Cinder driver will
+ automatically use the StorPool API to create definitions for new initiators
@@ -1147,6 +1087,25 @@
Using the StorPool volume driver
--------------------------------
+diff --git a/releasenotes/notes/storpool-iscsi-cefcfe590a07c5c7.yaml b/releasenotes/notes/storpool-iscsi-cefcfe590a07c5c7.yaml
+new file mode 100644
+index 000000000..edf46d298
+--- /dev/null
++++ b/releasenotes/notes/storpool-iscsi-cefcfe590a07c5c7.yaml
+@@ -0,0 +1,13 @@
++---
++features:
++ - |
++ StorPool driver: Added support for exporting the StorPool-backed volumes
++ using the iSCSI protocol, so that the Cinder volume service and/or
++ the Nova or Glance consumers do not need to have the StorPool block
++ device third-party service installed. See the StorPool driver section in
++ the Cinder documentation for more information on the ``iscsi_export_to``,
++ ``iscsi_portal_group``, ``iscsi_cinder_volume``, and
++ ``iscsi_learn_initiator_iqns`` options.
++ Note that multiattach support for StorPool is now only enabled if
++ ``iscsi_export_to`` is set to ``*`, that is, all StorPool volumes will be
++ exported via iSCSI to all initiators.
--
2.43.0