blob: 1ec090804e487b47ab547215230efed04afb19ed [file] [log] [blame]
From 2acfdddf0794754aa0e32a56800e77387f75ce38 Mon Sep 17 00:00:00 2001
From: Peter Penchev <openstack-dev@storpool.com>
Date: Mon, 12 Mar 2018 12:00:10 +0200
Subject: [PATCH] Add iSCSI export support to the StorPool driver
Add four new driver options:
- storpool_iscsi_cinder_volume: use StorPool iSCSI attachments whenever
the cinder-volume service needs to attach a volume to the controller,
e.g. for copying an image to a volume or vice versa
- storpool_iscsi_export_to:
- an empty string to use the StorPool native protocol for exporting
volumes
- the string "*" to always use iSCSI for exporting volumes
- an experimental, not fully supported list of IQN patterns to export
volumes to using iSCSI; this results in a Cinder driver that exports
different volumes using different storage protocols
- storpool_iscsi_portal_group: the name of the iSCSI portal group
defined in the StorPool configuration to use for these export
- storpool_iscsi_learn_initiator_iqns: automatically create StorPool
configuration records for an initiator when a volume is first exported
to it
When exporting volumes via iSCSI, report the storage protocol as
"iSCSI".
Change-Id: I9de64306e0e6976268df782053b0651dd1cca96f
---
.../unit/volume/drivers/test_storpool.py | 521 +++++++++++++++++-
cinder/volume/drivers/storpool.py | 379 ++++++++++++-
.../drivers/storpool-volume-driver.rst | 68 ++-
.../storpool-iscsi-cefcfe590a07c5c7.yaml | 15 +
4 files changed, 972 insertions(+), 11 deletions(-)
create mode 100644 releasenotes/notes/storpool-iscsi-cefcfe590a07c5c7.yaml
diff --git a/cinder/tests/unit/volume/drivers/test_storpool.py b/cinder/tests/unit/volume/drivers/test_storpool.py
index 44707d0b8..d6347e1d5 100644
--- a/cinder/tests/unit/volume/drivers/test_storpool.py
+++ b/cinder/tests/unit/volume/drivers/test_storpool.py
@@ -14,14 +14,24 @@
# under the License.
+from __future__ import annotations
+
+import dataclasses
import itertools
import re
import sys
+from typing import Any, NamedTuple, TYPE_CHECKING # noqa: H301
from unittest import mock
import ddt
from oslo_utils import units
+if TYPE_CHECKING:
+ if sys.version_info >= (3, 11):
+ from typing import Self
+ else:
+ from typing_extensions import Self
+
fakeStorPool = mock.Mock()
fakeStorPool.spopenstack = mock.Mock()
@@ -31,6 +41,7 @@ fakeStorPool.sptypes = mock.Mock()
sys.modules['storpool'] = fakeStorPool
+from cinder.common import constants
from cinder import exception
from cinder.tests.unit import fake_constants
from cinder.tests.unit import test
@@ -38,6 +49,13 @@ from cinder.volume import configuration as conf
from cinder.volume.drivers import storpool as driver
+_ISCSI_IQN_OURS = 'beleriand'
+_ISCSI_IQN_OTHER = 'rohan'
+_ISCSI_IQN_THIRD = 'gondor'
+_ISCSI_PAT_OTHER = 'roh*'
+_ISCSI_PAT_BOTH = '*riand roh*'
+_ISCSI_PORTAL_GROUP = 'openstack_pg'
+
volume_types = {
fake_constants.VOLUME_TYPE_ID: {},
fake_constants.VOLUME_TYPE2_ID: {'storpool_template': 'ssd'},
@@ -71,6 +89,10 @@ def snapshotName(vtype, vid):
return 'os--snap--{t}--{id}'.format(t=vtype, id=vid)
+def targetName(vid):
+ return 'iqn.2012-11.storpool:{id}'.format(id=vid)
+
+
class MockDisk(object):
def __init__(self, diskId):
self.id = diskId
@@ -195,6 +217,315 @@ def MockVolumeUpdateDesc(size):
return {'size': size}
+@dataclasses.dataclass(frozen=True)
+class MockIscsiNetwork:
+ """Mock a StorPool IP CIDR network definition (partially)."""
+
+ address: str
+
+
+@dataclasses.dataclass(frozen=True)
+class MockIscsiPortalGroup:
+ """Mock a StorPool iSCSI portal group definition (partially)."""
+
+ name: str
+ networks: list[MockIscsiNetwork]
+
+
+@dataclasses.dataclass(frozen=True)
+class MockIscsiExport:
+ """Mock a StorPool iSCSI exported volume/target definition."""
+
+ portalGroup: str
+ target: str
+
+
+@dataclasses.dataclass(frozen=True)
+class MockIscsiInitiator:
+ """Mock a StorPool iSCSI initiator definition."""
+
+ name: str
+ exports: list[MockIscsiExport]
+
+
+@dataclasses.dataclass(frozen=True)
+class MockIscsiTarget:
+ """Mock a StorPool iSCSI volume-to-target mapping definition."""
+
+ name: str
+ volume: str
+
+
+class IscsiTestCase(NamedTuple):
+ """A single test case for the iSCSI config and export methods."""
+
+ initiator: str | None
+ volume: str | None
+ exported: bool
+ commands_count: int
+
+
+@dataclasses.dataclass(frozen=True)
+class MockIscsiConfig:
+ """Mock the structure returned by the "get current config" query."""
+
+ portalGroups: dict[str, MockIscsiPortalGroup]
+ initiators: dict[str, MockIscsiInitiator]
+ targets: dict[str, MockIscsiTarget]
+
+ @classmethod
+ def build(cls, tcase: IscsiTestCase) -> Self:
+ """Build a test config structure."""
+ initiators = {
+ '0': MockIscsiInitiator(name=_ISCSI_IQN_OTHER, exports=[]),
+ }
+ if tcase.initiator is not None:
+ initiators['1'] = MockIscsiInitiator(
+ name=tcase.initiator,
+ exports=(
+ [
+ MockIscsiExport(
+ portalGroup=_ISCSI_PORTAL_GROUP,
+ target=targetName(tcase.volume),
+ ),
+ ]
+ if tcase.exported
+ else []
+ ),
+ )
+
+ targets = {
+ '0': MockIscsiTarget(
+ name=targetName(fake_constants.VOLUME2_ID),
+ volume=volumeName(fake_constants.VOLUME2_ID),
+ ),
+ }
+ if tcase.volume is not None:
+ targets['1'] = MockIscsiTarget(
+ name=targetName(tcase.volume),
+ volume=volumeName(tcase.volume),
+ )
+
+ return cls(
+ portalGroups={
+ '0': MockIscsiPortalGroup(
+ name=_ISCSI_PORTAL_GROUP + '-not',
+ networks=[],
+ ),
+ '1': MockIscsiPortalGroup(
+ name=_ISCSI_PORTAL_GROUP,
+ networks=[
+ MockIscsiNetwork(address="192.0.2.0"),
+ MockIscsiNetwork(address="195.51.100.0"),
+ ],
+ ),
+ },
+ initiators=initiators,
+ targets=targets,
+ )
+
+
+@dataclasses.dataclass(frozen=True)
+class MockIscsiConfigTop:
+ """Mock the top level of the "get the iSCSI configuration" response."""
+
+ iscsi: MockIscsiConfig
+
+
+class MockIscsiAPI:
+ """Mock only the iSCSI-related calls of the StorPool API bindings."""
+
+ _asrt: test.TestCase
+ _configs: list[MockIscsiConfig]
+
+ def __init__(
+ self,
+ configs: list[MockIscsiConfig],
+ asrt: test.TestCase,
+ ) -> None:
+ """Store the reference to the list of iSCSI config objects."""
+ self._asrt = asrt
+ self._configs = configs
+
+ def iSCSIConfig(self) -> MockIscsiConfigTop:
+ """Return the last version of the iSCSI configuration."""
+ return MockIscsiConfigTop(iscsi=self._configs[-1])
+
+ def _handle_export(
+ self,
+ cfg: MockIscsiConfig, cmd: dict[str, Any],
+ ) -> MockIscsiConfig:
+ """Add an export for an initiator."""
+ self._asrt.assertDictEqual(
+ cmd,
+ {
+ 'initiator': _ISCSI_IQN_OURS,
+ 'portalGroup': _ISCSI_PORTAL_GROUP,
+ 'volumeName': volumeName(fake_constants.VOLUME_ID),
+ },
+ )
+ self._asrt.assertEqual(cfg.initiators['1'].name, cmd['initiator'])
+ self._asrt.assertListEqual(cfg.initiators['1'].exports, [])
+
+ return dataclasses.replace(
+ cfg,
+ initiators={
+ **cfg.initiators,
+ '1': dataclasses.replace(
+ cfg.initiators['1'],
+ exports=[
+ MockIscsiExport(
+ portalGroup=cmd['portalGroup'],
+ target=targetName(fake_constants.VOLUME_ID),
+ ),
+ ],
+ ),
+ },
+ )
+
+ def _handle_delete_export(
+ self,
+ cfg: MockIscsiConfig,
+ cmd: dict[str, Any],
+ ) -> MockIscsiConfig:
+ """Delete an export for an initiator."""
+ self._asrt.assertDictEqual(
+ cmd,
+ {
+ 'initiator': _ISCSI_IQN_OURS,
+ 'portalGroup': _ISCSI_PORTAL_GROUP,
+ 'volumeName': volumeName(fake_constants.VOLUME_ID),
+ },
+ )
+ self._asrt.assertEqual(cfg.initiators['1'].name, cmd['initiator'])
+ self._asrt.assertListEqual(
+ cfg.initiators['1'].exports,
+ [MockIscsiExport(portalGroup=_ISCSI_PORTAL_GROUP,
+ target=cfg.targets['1'].name)])
+
+ updated_initiators = cfg.initiators
+ del updated_initiators['1']
+ return dataclasses.replace(cfg, initiators=updated_initiators)
+
+ def _handle_create_initiator(
+ self,
+ cfg: MockIscsiConfig,
+ cmd: dict[str, Any],
+ ) -> MockIscsiConfig:
+ """Add a whole new initiator."""
+ self._asrt.assertDictEqual(
+ cmd,
+ {
+ 'name': _ISCSI_IQN_OURS,
+ 'username': '',
+ 'secret': '',
+ },
+ )
+ self._asrt.assertNotIn(
+ cmd['name'],
+ [init.name for init in cfg.initiators.values()],
+ )
+ self._asrt.assertListEqual(sorted(cfg.initiators), ['0'])
+
+ return dataclasses.replace(
+ cfg,
+ initiators={
+ **cfg.initiators,
+ '1': MockIscsiInitiator(name=cmd['name'], exports=[]),
+ },
+ )
+
+ def _handle_create_target(
+ self,
+ cfg: MockIscsiConfig,
+ cmd: dict[str, Any],
+ ) -> MockIscsiConfig:
+ """Add a target for a volume so that it may be exported."""
+ self._asrt.assertDictEqual(
+ cmd,
+ {'volumeName': volumeName(fake_constants.VOLUME_ID)},
+ )
+ self._asrt.assertListEqual(sorted(cfg.targets), ['0'])
+ return dataclasses.replace(
+ cfg,
+ targets={
+ **cfg.targets,
+ '1': MockIscsiTarget(
+ name=targetName(fake_constants.VOLUME_ID),
+ volume=volumeName(fake_constants.VOLUME_ID),
+ ),
+ },
+ )
+
+ def _handle_delete_target(
+ self,
+ cfg: MockIscsiConfig,
+ cmd: dict[str, Any]
+ ) -> MockIscsiConfig:
+ """Remove a target for a volume."""
+ self._asrt.assertDictEqual(
+ cmd,
+ {'volumeName': volumeName(fake_constants.VOLUME_ID)},
+ )
+
+ self._asrt.assertListEqual(sorted(cfg.targets), ['0', '1'])
+ updated_targets = cfg.targets
+ del updated_targets['1']
+ return dataclasses.replace(cfg, targets=updated_targets)
+
+ def _handle_initiator_add_network(
+ self,
+ cfg: MockIscsiConfig,
+ cmd: dict[str, Any],
+ ) -> MockIscsiConfig:
+ """Add a network that an initiator is allowed to log in from."""
+ self._asrt.assertDictEqual(
+ cmd,
+ {
+ 'initiator': _ISCSI_IQN_OURS,
+ 'net': '0.0.0.0/0',
+ },
+ )
+ return dataclasses.replace(cfg)
+
+ _CMD_HANDLERS = {
+ 'createInitiator': _handle_create_initiator,
+ 'createTarget': _handle_create_target,
+ 'deleteTarget': _handle_delete_target,
+ 'export': _handle_export,
+ 'exportDelete': _handle_delete_export,
+ 'initiatorAddNetwork': _handle_initiator_add_network,
+ }
+
+ def iSCSIConfigChange(
+ self,
+ commands: dict[str, list[dict[str, dict[str, Any]]]],
+ ) -> None:
+ """Apply the requested changes to the iSCSI configuration.
+
+ This method adds a new config object to the configs list,
+ making a shallow copy of the last one and applying the changes
+ specified in the list of commands.
+ """
+ self._asrt.assertListEqual(sorted(commands), ['commands'])
+ self._asrt.assertGreater(len(commands['commands']), 0)
+ for cmd in commands['commands']:
+ keys = sorted(cmd.keys())
+ cmd_name = keys[0]
+ self._asrt.assertListEqual(keys, [cmd_name])
+ handler = self._CMD_HANDLERS[cmd_name]
+ new_cfg = handler(self, self._configs[-1], cmd[cmd_name])
+ self._configs.append(new_cfg)
+
+
+_ISCSI_TEST_CASES = [
+ IscsiTestCase(None, None, False, 4),
+ IscsiTestCase(_ISCSI_IQN_OURS, None, False, 2),
+ IscsiTestCase(_ISCSI_IQN_OURS, fake_constants.VOLUME_ID, False, 1),
+ IscsiTestCase(_ISCSI_IQN_OURS, fake_constants.VOLUME_ID, True, 0),
+]
+
+
def MockSPConfig(section = 's01'):
res = {}
m = re.match('^s0*([A-Za-z0-9]+)$', section)
@@ -237,7 +568,15 @@ class StorPoolTestCase(test.TestCase):
self.cfg.volume_backend_name = 'storpool_test'
self.cfg.storpool_template = None
self.cfg.storpool_replication = 3
+ self.cfg.storpool_iscsi_cinder_volume = False
+ self.cfg.storpool_iscsi_export_to = ''
+ self.cfg.storpool_iscsi_learn_initiator_iqns = True
+ self.cfg.storpool_iscsi_portal_group = _ISCSI_PORTAL_GROUP
+ self._setup_test_driver()
+
+ def _setup_test_driver(self):
+ """Initialize a StorPool driver as per the current configuration."""
mock_exec = mock.Mock()
mock_exec.return_value = ('', '')
@@ -246,7 +585,7 @@ class StorPoolTestCase(test.TestCase):
self.driver.check_for_setup_error()
@ddt.data(
- (5, TypeError),
+ (5, (TypeError, AttributeError)),
({'no-host': None}, KeyError),
({'host': 'sbad'}, driver.StorPoolConfigurationInvalid),
({'host': 's01'}, None),
@@ -262,7 +601,7 @@ class StorPoolTestCase(test.TestCase):
conn)
@ddt.data(
- (5, TypeError),
+ (5, (TypeError, AttributeError)),
({'no-host': None}, KeyError),
({'host': 'sbad'}, driver.StorPoolConfigurationInvalid),
)
@@ -301,7 +640,7 @@ class StorPoolTestCase(test.TestCase):
self.assertEqual(21, pool['total_capacity_gb'])
self.assertEqual(5, int(pool['free_capacity_gb']))
- self.assertTrue(pool['multiattach'])
+ self.assertFalse(pool['multiattach'])
self.assertFalse(pool['QoS_support'])
self.assertFalse(pool['thick_provisioning_support'])
self.assertTrue(pool['thin_provisioning_support'])
@@ -720,3 +1059,179 @@ class StorPoolTestCase(test.TestCase):
'No such volume',
self.driver.revert_to_snapshot, None,
{'id': vol_id}, {'id': snap_id})
+
+ @ddt.data(
+ # The default values
+ ('', False, constants.STORPOOL, _ISCSI_IQN_OURS, False),
+
+ # Export to all
+ ('*', True, constants.ISCSI, _ISCSI_IQN_OURS, True),
+ ('*', True, constants.ISCSI, _ISCSI_IQN_OURS, True),
+
+ # Only export to the controller
+ ('', False, constants.STORPOOL, _ISCSI_IQN_OURS, False),
+
+ # Some of the not-fully-supported pattern lists
+ (_ISCSI_PAT_OTHER, False, constants.STORPOOL, _ISCSI_IQN_OURS, False),
+ (_ISCSI_PAT_OTHER, False, constants.STORPOOL, _ISCSI_IQN_OTHER, True),
+ (_ISCSI_PAT_BOTH, False, constants.STORPOOL, _ISCSI_IQN_OURS, True),
+ (_ISCSI_PAT_BOTH, False, constants.STORPOOL, _ISCSI_IQN_OTHER, True),
+ )
+ @ddt.unpack
+ def test_wants_iscsi(self, storpool_iscsi_export_to, use_iscsi,
+ storage_protocol, hostname, expected):
+ """Check the "should this export use iSCSI?" detection."""
+ self.cfg.storpool_iscsi_export_to = storpool_iscsi_export_to
+ self._setup_test_driver()
+ self.assertEqual(self.driver._use_iscsi, use_iscsi)
+
+ # Make sure the driver reports the correct protocol in the stats
+ self.driver._update_volume_stats()
+ self.assertEqual(self.driver._stats["vendor_name"], "StorPool")
+ self.assertEqual(self.driver._stats["storage_protocol"],
+ storage_protocol)
+
+ def check(conn, forced, expected):
+ """Pass partially or completely valid connector info."""
+ for initiator in (None, hostname):
+ for host in (None, _ISCSI_IQN_THIRD):
+ self.assertEqual(
+ self.driver._connector_wants_iscsi({
+ "host": host,
+ "initiator": initiator,
+ **conn,
+ }),
+ expected if initiator is not None and host is not None
+ else forced)
+
+ # If storpool_iscsi_cinder_volume is set and this is the controller,
+ # then yes.
+ check({"storpool_wants_iscsi": True}, True, True)
+
+ # If storpool_iscsi_cinder_volume is not set or this is not the
+ # controller, then look at the specified expected value.
+ check({"storpool_wants_iscsi": False}, use_iscsi, expected)
+ check({}, use_iscsi, expected)
+
+ def _validate_iscsi_config(
+ self,
+ cfg: MockIscsiConfig,
+ res: dict[str, Any],
+ tcase: IscsiTestCase,
+ ) -> None:
+ """Make sure the returned structure makes sense."""
+ initiator = res['initiator']
+ cfg_initiator = cfg.initiators.get('1')
+
+ self.assertIs(res['cfg'].iscsi, cfg)
+ self.assertEqual(res['pg'].name, _ISCSI_PORTAL_GROUP)
+
+ if tcase.initiator is None:
+ self.assertIsNone(initiator)
+ else:
+ self.assertIsNotNone(initiator)
+ self.assertEqual(initiator, cfg_initiator)
+
+ if tcase.volume is None:
+ self.assertIsNone(res['target'])
+ else:
+ self.assertIsNotNone(res['target'])
+ self.assertEqual(res['target'], cfg.targets.get('1'))
+
+ if tcase.initiator is None:
+ self.assertIsNone(cfg_initiator)
+ self.assertIsNone(res['export'])
+ else:
+ self.assertIsNotNone(cfg_initiator)
+ if tcase.exported:
+ self.assertIsNotNone(res['export'])
+ self.assertEqual(res['export'], cfg_initiator.exports[0])
+ else:
+ self.assertIsNone(res['export'])
+
+ @ddt.data(*_ISCSI_TEST_CASES)
+ def test_iscsi_get_config(self, tcase: IscsiTestCase) -> None:
+ """Make sure the StorPool iSCSI configuration is parsed correctly."""
+ cfg_orig = MockIscsiConfig.build(tcase)
+ configs = [cfg_orig]
+ iapi = MockIscsiAPI(configs, self)
+ with mock.patch.object(self.driver._attach, 'api', new=lambda: iapi):
+ res = self.driver._get_iscsi_config(
+ _ISCSI_IQN_OURS,
+ fake_constants.VOLUME_ID,
+ )
+
+ self._validate_iscsi_config(cfg_orig, res, tcase)
+
+ @ddt.data(*_ISCSI_TEST_CASES)
+ def test_iscsi_create_export(self, tcase: IscsiTestCase) -> None:
+ """Make sure _create_iscsi_export() makes the right API calls."""
+ cfg_orig = MockIscsiConfig.build(tcase)
+ configs = [cfg_orig]
+ iapi = MockIscsiAPI(configs, self)
+ with mock.patch.object(self.driver._attach, 'api', new=lambda: iapi):
+ self.driver._create_iscsi_export(
+ {
+ 'id': fake_constants.VOLUME_ID,
+ 'display_name': fake_constants.VOLUME_NAME,
+ },
+ {
+ # Yeah, okay, so we cheat a little bit here...
+ 'host': _ISCSI_IQN_OURS + '.hostname',
+ 'initiator': _ISCSI_IQN_OURS,
+ },
+ )
+
+ self.assertEqual(len(configs), tcase.commands_count + 1)
+ cfg_final = configs[-1]
+ self.assertEqual(cfg_final.initiators['1'].name, _ISCSI_IQN_OURS)
+ self.assertEqual(
+ cfg_final.initiators['1'].exports[0].target,
+ targetName(fake_constants.VOLUME_ID),
+ )
+ self.assertEqual(
+ cfg_final.targets['1'].volume,
+ volumeName(fake_constants.VOLUME_ID),
+ )
+
+ @ddt.data(*_ISCSI_TEST_CASES)
+ def test_remove_iscsi_export(self, tcase: IscsiTestCase):
+ cfg_orig = MockIscsiConfig.build(tcase)
+ configs = [cfg_orig]
+ iapi = MockIscsiAPI(configs, self)
+
+ def _target_exists(cfg: MockIscsiConfig, volume: str) -> bool:
+ for name, target in cfg.targets.items():
+ if target.volume == volumeName(volume):
+ return True
+ return False
+
+ def _export_exists(cfg: MockIscsiConfig, volume: str) -> bool:
+ for name, initiator in cfg.initiators.items():
+ for export in initiator.exports:
+ if export.target == targetName(volume):
+ return True
+ return False
+
+ if tcase.exported:
+ self.assertTrue(
+ _target_exists(iapi.iSCSIConfig().iscsi, tcase.volume))
+ self.assertTrue(
+ _export_exists(iapi.iSCSIConfig().iscsi, tcase.volume))
+
+ with mock.patch.object(self.driver._attach, 'api', new=lambda: iapi):
+ self.driver._remove_iscsi_export(
+ {
+ 'id': fake_constants.VOLUME_ID,
+ 'display_name': fake_constants.VOLUME_NAME,
+ },
+ {
+ 'host': _ISCSI_IQN_OURS + '.hostname',
+ 'initiator': _ISCSI_IQN_OURS,
+ },
+ )
+
+ self.assertFalse(
+ _target_exists(iapi.iSCSIConfig().iscsi, tcase.volume))
+ self.assertFalse(
+ _export_exists(iapi.iSCSIConfig().iscsi, tcase.volume))
diff --git a/cinder/volume/drivers/storpool.py b/cinder/volume/drivers/storpool.py
index a8200a7f1..d931200f6 100644
--- a/cinder/volume/drivers/storpool.py
+++ b/cinder/volume/drivers/storpool.py
@@ -15,6 +15,7 @@
"""StorPool block device driver"""
+import fnmatch
import platform
from oslo_config import cfg
@@ -43,6 +44,32 @@ if storpool:
storpool_opts = [
+ cfg.BoolOpt('storpool_iscsi_cinder_volume',
+ default=False,
+ help='Let the cinder-volume service use iSCSI instead of '
+ 'the StorPool block device driver for accessing '
+ 'StorPool volumes, e.g. when creating a volume from '
+ 'an image or vice versa.'),
+ cfg.StrOpt('storpool_iscsi_export_to',
+ default='',
+ help='Whether to export volumes using iSCSI. '
+ 'An empty string (the default) makes the driver export '
+ 'all volumes using the StorPool native network protocol. '
+ 'The value "*" makes the driver export all volumes using '
+ 'iSCSI (see the Cinder StorPool driver documentation for '
+ 'how this option and ``storpool_iscsi_cinder_volume`` '
+ 'interact). Any other value leads to an experimental '
+ 'not fully supported configuration and is interpreted as '
+ 'a whitespace-separated list of patterns for IQNs for '
+ 'hosts that need volumes to be exported via iSCSI, e.g. '
+ '"iqn.1991-05.com.microsoft:\\*" for Windows hosts.'),
+ cfg.BoolOpt('storpool_iscsi_learn_initiator_iqns',
+ default=True,
+ help='Create a StorPool record for a new initiator as soon as '
+ 'Cinder asks for a volume to be exported to it.'),
+ cfg.StrOpt('storpool_iscsi_portal_group',
+ default=None,
+ help='The portal group to export volumes via iSCSI in.'),
cfg.StrOpt('storpool_template',
default=None,
help='The StorPool template for volumes with no type.'),
@@ -93,9 +120,10 @@ class StorPoolDriver(driver.VolumeDriver):
add ignore_errors to the internal _detach_volume() method
1.2.3 - Advertise some more driver capabilities.
2.0.0 - Implement revert_to_snapshot().
+ 2.1.0 - Add iSCSI export support.
"""
- VERSION = '2.0.0'
+ VERSION = '2.1.0'
CI_WIKI_NAME = 'StorPool_distributed_storage_CI'
def __init__(self, *args, **kwargs):
@@ -105,6 +133,7 @@ class StorPoolDriver(driver.VolumeDriver):
self._ourId = None
self._ourIdInt = None
self._attach = None
+ self._use_iscsi = False
@staticmethod
def get_driver_options():
@@ -159,10 +188,327 @@ class StorPoolDriver(driver.VolumeDriver):
raise StorPoolConfigurationInvalid(
section=hostname, param='SP_OURID', error=e)
+ def _connector_wants_iscsi(self, connector):
+ """Should we do this export via iSCSI?
+
+ Check the configuration to determine whether this connector is
+ expected to provide iSCSI exports as opposed to native StorPool
+ protocol ones. Match the initiator's IQN against the list of
+ patterns supplied in the "storpool_iscsi_export_to" configuration
+ setting.
+ """
+ if connector is None:
+ return False
+ if self._use_iscsi:
+ LOG.debug(' - forcing iSCSI for all exported volumes')
+ return True
+ if connector.get('storpool_wants_iscsi'):
+ LOG.debug(' - forcing iSCSI for the controller')
+ return True
+
+ try:
+ iqn = connector.get('initiator')
+ except Exception:
+ iqn = None
+ try:
+ host = connector.get('host')
+ except Exception:
+ host = None
+ if iqn is None or host is None:
+ LOG.debug(' - this connector certainly does not want iSCSI')
+ return False
+
+ LOG.debug(' - check whether %(host)s (%(iqn)s) wants iSCSI',
+ {
+ 'host': host,
+ 'iqn': iqn,
+ })
+
+ export_to = self.configuration.storpool_iscsi_export_to
+ if export_to is None:
+ return False
+
+ for pat in export_to.split():
+ LOG.debug(' - matching against %(pat)s', {'pat': pat})
+ if fnmatch.fnmatch(iqn, pat):
+ LOG.debug(' - got it!')
+ return True
+ LOG.debug(' - nope')
+ return False
+
def validate_connector(self, connector):
+ if self._connector_wants_iscsi(connector):
+ return True
return self._storpool_client_id(connector) >= 0
+ def _get_iscsi_config(self, iqn, volume_id):
+ """Get the StorPool iSCSI config items pertaining to this volume.
+
+ Find the elements of the StorPool iSCSI configuration tree that
+ will be needed to create, ensure, or remove the iSCSI export of
+ the specified volume to the specified initiator.
+ """
+ cfg = self._attach.api().iSCSIConfig()
+
+ pg_name = self.configuration.storpool_iscsi_portal_group
+ pg_found = [
+ pg for pg in cfg.iscsi.portalGroups.values() if pg.name == pg_name
+ ]
+ if not pg_found:
+ raise Exception('StorPool Cinder iSCSI configuration error: '
+ 'no portal group "{pg}"'.format(pg=pg_name))
+ pg = pg_found[0]
+
+ # Do we know about this initiator?
+ i_found = [
+ init for init in cfg.iscsi.initiators.values() if init.name == iqn
+ ]
+ if i_found:
+ initiator = i_found[0]
+ else:
+ initiator = None
+
+ # Is this volume already being exported?
+ volname = self._attach.volumeName(volume_id)
+ t_found = [
+ tgt for tgt in cfg.iscsi.targets.values() if tgt.volume == volname
+ ]
+ if t_found:
+ target = t_found[0]
+ else:
+ target = None
+
+ # OK, so is this volume being exported to this initiator?
+ export = None
+ if initiator is not None and target is not None:
+ e_found = [
+ exp for exp in initiator.exports
+ if exp.portalGroup == pg.name and exp.target == target.name
+ ]
+ if e_found:
+ export = e_found[0]
+
+ return {
+ 'cfg': cfg,
+ 'pg': pg,
+ 'initiator': initiator,
+ 'target': target,
+ 'export': export,
+ 'volume_name': volname,
+ 'volume_id': volume_id,
+ }
+
+ def _create_iscsi_export(self, volume, connector):
+ """Create (if needed) an iSCSI export for the StorPool volume."""
+ LOG.debug(
+ '_create_iscsi_export() invoked for volume '
+ '"%(vol_name)s" (%(vol_id)s) connector %(connector)s',
+ {
+ 'vol_name': volume['display_name'],
+ 'vol_id': volume['id'],
+ 'connector': connector,
+ }
+ )
+ iqn = connector['initiator']
+ try:
+ cfg = self._get_iscsi_config(iqn, volume['id'])
+ except Exception as exc:
+ LOG.error(
+ 'Could not fetch the iSCSI config: %(exc)s', {'exc': exc}
+ )
+ raise
+
+ if cfg['initiator'] is None:
+ if not (self.configuration.storpool_iscsi_learn_initiator_iqns or
+ self.configuration.storpool_iscsi_cinder_volume and
+ connector.get('storpool_wants_iscsi')):
+ raise Exception('The "{iqn}" initiator IQN for the "{host}" '
+ 'host is not defined in the StorPool '
+ 'configuration.'
+ .format(iqn=iqn, host=connector['host']))
+ else:
+ LOG.info('Creating a StorPool iSCSI initiator '
+ 'for "{host}s" ({iqn}s)',
+ {'host': connector['host'], 'iqn': iqn})
+ self._attach.api().iSCSIConfigChange({
+ 'commands': [
+ {
+ 'createInitiator': {
+ 'name': iqn,
+ 'username': '',
+ 'secret': '',
+ },
+ },
+ {
+ 'initiatorAddNetwork': {
+ 'initiator': iqn,
+ 'net': '0.0.0.0/0',
+ },
+ },
+ ]
+ })
+
+ if cfg['target'] is None:
+ LOG.info(
+ 'Creating a StorPool iSCSI target '
+ 'for the "%(vol_name)s" volume (%(vol_id)s)',
+ {
+ 'vol_name': volume['display_name'],
+ 'vol_id': volume['id'],
+ }
+ )
+ self._attach.api().iSCSIConfigChange({
+ 'commands': [
+ {
+ 'createTarget': {
+ 'volumeName': cfg['volume_name'],
+ },
+ },
+ ]
+ })
+ cfg = self._get_iscsi_config(iqn, volume['id'])
+
+ if cfg['export'] is None:
+ LOG.info('Creating a StorPool iSCSI export '
+ 'for the "{vol_name}s" volume ({vol_id}s) '
+ 'to the "{host}s" initiator ({iqn}s) '
+ 'in the "{pg}s" portal group',
+ {
+ 'vol_name': volume['display_name'],
+ 'vol_id': volume['id'],
+ 'host': connector['host'],
+ 'iqn': iqn,
+ 'pg': cfg['pg'].name
+ })
+ self._attach.api().iSCSIConfigChange({
+ 'commands': [
+ {
+ 'export': {
+ 'initiator': iqn,
+ 'portalGroup': cfg['pg'].name,
+ 'volumeName': cfg['volume_name'],
+ },
+ },
+ ]
+ })
+
+ target_portals = [
+ "{addr}:3260".format(addr=net.address)
+ for net in cfg['pg'].networks
+ ]
+ target_iqns = [cfg['target'].name] * len(target_portals)
+ target_luns = [0] * len(target_portals)
+ if connector.get('multipath', False):
+ multipath_settings = {
+ 'target_iqns': target_iqns,
+ 'target_portals': target_portals,
+ 'target_luns': target_luns,
+ }
+ else:
+ multipath_settings = {}
+
+ res = {
+ 'driver_volume_type': 'iscsi',
+ 'data': {
+ **multipath_settings,
+ 'target_discovered': False,
+ 'target_iqn': target_iqns[0],
+ 'target_portal': target_portals[0],
+ 'target_lun': target_luns[0],
+ 'volume_id': volume['id'],
+ 'discard': True,
+ },
+ }
+ LOG.debug('returning %(res)s', {'res': res})
+ return res
+
+ def _remove_iscsi_export(self, volume, connector):
+ """Remove an iSCSI export for the specified StorPool volume."""
+ LOG.debug(
+ '_remove_iscsi_export() invoked for volume '
+ '"%(vol_name)s" (%(vol_id)s) connector %(conn)s',
+ {
+ 'vol_name': volume['display_name'],
+ 'vol_id': volume['id'],
+ 'conn': connector,
+ }
+ )
+ try:
+ cfg = self._get_iscsi_config(connector['initiator'], volume['id'])
+ except Exception as exc:
+ LOG.error(
+ 'Could not fetch the iSCSI config: %(exc)s', {'exc': exc}
+ )
+ raise
+
+ if cfg['export'] is not None:
+ LOG.info('Removing the StorPool iSCSI export '
+ 'for the "%(vol_name)s" volume (%(vol_id)s) '
+ 'to the "%(host)s" initiator (%(iqn)s) '
+ 'in the "%(pg)s" portal group',
+ {
+ 'vol_name': volume['display_name'],
+ 'vol_id': volume['id'],
+ 'host': connector['host'],
+ 'iqn': connector['initiator'],
+ 'pg': cfg['pg'].name,
+ })
+ try:
+ self._attach.api().iSCSIConfigChange({
+ 'commands': [
+ {
+ 'exportDelete': {
+ 'initiator': cfg['initiator'].name,
+ 'portalGroup': cfg['pg'].name,
+ 'volumeName': cfg['volume_name'],
+ },
+ },
+ ]
+ })
+ except spapi.ApiError as e:
+ if e.name not in ('objectExists', 'objectDoesNotExist'):
+ raise
+ LOG.info('Looks like somebody beat us to it')
+
+ if cfg['target'] is not None:
+ last = True
+ for initiator in cfg['cfg'].iscsi.initiators.values():
+ if initiator.name == cfg['initiator'].name:
+ continue
+ for exp in initiator.exports:
+ if exp.target == cfg['target'].name:
+ last = False
+ break
+ if not last:
+ break
+
+ if last:
+ LOG.info(
+ 'Removing the StorPool iSCSI target '
+ 'for the "{vol_name}s" volume ({vol_id}s)',
+ {
+ 'vol_name': volume['display_name'],
+ 'vol_id': volume['id'],
+ }
+ )
+ try:
+ self._attach.api().iSCSIConfigChange({
+ 'commands': [
+ {
+ 'deleteTarget': {
+ 'volumeName': cfg['volume_name'],
+ },
+ },
+ ]
+ })
+ except spapi.ApiError as e:
+ if e.name not in ('objectDoesNotExist', 'invalidParam'):
+ raise
+ LOG.info('Looks like somebody beat us to it')
+
def initialize_connection(self, volume, connector):
+ if self._connector_wants_iscsi(connector):
+ return self._create_iscsi_export(volume, connector)
return {'driver_volume_type': 'storpool',
'data': {
'client_id': self._storpool_client_id(connector),
@@ -171,6 +517,9 @@ class StorPoolDriver(driver.VolumeDriver):
}}
def terminate_connection(self, volume, connector, **kwargs):
+ if self._connector_wants_iscsi(connector):
+ LOG.debug('- removing an iSCSI export')
+ self._remove_iscsi_export(volume, connector)
pass
def create_snapshot(self, snapshot):
@@ -272,11 +621,20 @@ class StorPoolDriver(driver.VolumeDriver):
)
def create_export(self, context, volume, connector):
- pass
+ if self._connector_wants_iscsi(connector):
+ LOG.debug('- creating an iSCSI export')
+ self._create_iscsi_export(volume, connector)
def remove_export(self, context, volume):
pass
+ def _attach_volume(self, context, volume, properties, remote=False):
+ if self.configuration.storpool_iscsi_cinder_volume and not remote:
+ LOG.debug('- adding the "storpool_wants_iscsi" flag')
+ properties['storpool_wants_iscsi'] = True
+
+ return super()._attach_volume(context, volume, properties, remote)
+
def delete_volume(self, volume):
name = self._attach.volumeName(volume['id'])
try:
@@ -313,6 +671,17 @@ class StorPoolDriver(driver.VolumeDriver):
LOG.error("StorPoolDriver API initialization failed: %s", e)
raise
+ export_to = self.configuration.storpool_iscsi_export_to
+ export_to_set = export_to is not None and export_to.split()
+ vol_iscsi = self.configuration.storpool_iscsi_cinder_volume
+ pg_name = self.configuration.storpool_iscsi_portal_group
+ if (export_to_set or vol_iscsi) and pg_name is None:
+ msg = _('The "storpool_iscsi_portal_group" option is required if '
+ 'any patterns are listed in "storpool_iscsi_export_to"')
+ raise exception.VolumeDriverException(message=msg)
+
+ self._use_iscsi = export_to == "*"
+
def _update_volume_stats(self):
try:
dl = self._attach.api().disksList()
@@ -338,7 +707,7 @@ class StorPoolDriver(driver.VolumeDriver):
'total_capacity_gb': total / units.Gi,
'free_capacity_gb': free / units.Gi,
'reserved_percentage': 0,
- 'multiattach': True,
+ 'multiattach': self._use_iscsi,
'QoS_support': False,
'thick_provisioning_support': False,
'thin_provisioning_support': True,
@@ -357,7 +726,9 @@ class StorPoolDriver(driver.VolumeDriver):
'volume_backend_name') or 'storpool',
'vendor_name': 'StorPool',
'driver_version': self.VERSION,
- 'storage_protocol': constants.STORPOOL,
+ 'storage_protocol': (
+ constants.ISCSI if self._use_iscsi else constants.STORPOOL
+ ),
# Driver capabilities
'clone_across_pools': True,
'sparse_copy_volume': True,
diff --git a/doc/source/configuration/block-storage/drivers/storpool-volume-driver.rst b/doc/source/configuration/block-storage/drivers/storpool-volume-driver.rst
index d2c5895a9..936e83675 100644
--- a/doc/source/configuration/block-storage/drivers/storpool-volume-driver.rst
+++ b/doc/source/configuration/block-storage/drivers/storpool-volume-driver.rst
@@ -19,12 +19,15 @@ Prerequisites
* The controller and all the compute nodes must have access to the StorPool
API service.
-* All nodes where StorPool-backed volumes will be attached must have access to
+* If iSCSI is not being used as a transport (see below), all nodes where
+ StorPool-backed volumes will be attached must have access to
the StorPool data network and run the ``storpool_block`` service.
-* If StorPool-backed Cinder volumes need to be created directly from Glance
- images, then the node running the ``cinder-volume`` service must also have
- access to the StorPool data network and run the ``storpool_block`` service.
+* If Glance uses Cinder as its image store, or if StorPool-backed Cinder
+ volumes need to be created directly from Glance images, and iSCSI is not
+ being used as a transport, then the node running the ``cinder-volume``
+ service must also have access to the StorPool data network and run
+ the ``storpool_block`` service.
* All nodes that need to access the StorPool API (the compute nodes and
the node running the ``cinder-volume`` service) must have the following
@@ -34,6 +37,34 @@ Prerequisites
* the storpool Python bindings package
* the storpool.spopenstack Python helper package
+Using iSCSI as the transport protocol
+-------------------------------------
+
+The StorPool distributed storage system uses its own, highly optimized and
+tailored for its specifics, network protocol for communication between
+the storage servers and the clients (the OpenStack cluster nodes where
+StorPool-backed volumes will be attached). There are cases when granting
+various nodes access to the StorPool data network or installing and
+running the ``storpool_block`` client service on them may pose difficulties.
+The StorPool servers may also expose the user-created volumes and snapshots
+using the standard iSCSI protocol that only requires TCP routing and
+connectivity between the storage servers and the StorPool clients.
+The StorPool Cinder driver may be configured to export volumes and
+snapshots via iSCSI using the ``storpool_iscsi_export_to`` and
+``storpool_iscsi_portal_group`` configuration options.
+
+Additionally, even if e.g. the hypervisor nodes running Nova will use
+the StorPool network protocol and run the ``storpool_block`` service
+(so the ``storpool_iscsi_export_to`` option has its default empty string
+value), the ``storpool_iscsi_cinder_volume`` option configures the
+StorPool Cinder driver so that only the ``cinder-volume`` service will
+use the iSCSI protocol when attaching volumes and snapshots to transfer
+data to and from Glance images.
+
+Multiattach support for StorPool is only enabled if iSCSI is used:
+``storpool_iscsi_export_to`` is set to ``*``, that is, when all StorPool
+volumes will be exported via iSCSI to all initiators.
+
Configuring the StorPool volume driver
--------------------------------------
@@ -55,6 +86,35 @@ volume backend definition) and per volume type:
with the default placement constraints for the StorPool cluster.
The default value for the chain replication is 3.
+In addition, if the iSCSI protocol is used to access the StorPool cluster as
+described in the previous section, the following options may be defined in
+the ``cinder.conf`` volume backend definition:
+
+- ``storpool_iscsi_export_to``: if set to the value ``*``, the StorPool
+ Cinder driver will export volumes and snapshots using the iSCSI
+ protocol instead of the StorPool network protocol. The
+ ``storpool_iscsi_portal_group`` option must also be specified.
+
+- ``storpool_iscsi_portal_group``: if the ``storpool_iscsi_export_to``
+ option is set to the value ``*`` or the
+ ``storpool_iscsi_cinder_volume`` option is turned on, this option
+ specifies the name of the iSCSI portal group that Cinder volumes will
+ be exported to.
+
+- ``storpool_iscsi_cinder_volume``: if enabled, even if the
+ ``storpool_iscsi_export_to`` option has its default empty value, the
+ ``cinder-volume`` service will use iSCSI to attach the volumes and
+ snapshots for transferring data to and from Glance images if Glance is
+ configured to use the Cinder glance_store.
+
+- ``storpool_iscsi_learn_initiator_iqns``: if enabled, the StorPool
+ Cinder driver will automatically use the StorPool API to create
+ definitions for new initiators in the StorPool cluster's
+ configuration. This is the default behavior of the driver; it may be
+ disabled in the rare case if, e.g. because of site policy, OpenStack
+ iSCSI initiators (e.g. Nova hypervisors) need to be explicitly allowed
+ to use the StorPool iSCSI targets.
+
Using the StorPool volume driver
--------------------------------
diff --git a/releasenotes/notes/storpool-iscsi-cefcfe590a07c5c7.yaml b/releasenotes/notes/storpool-iscsi-cefcfe590a07c5c7.yaml
new file mode 100644
index 000000000..3863e4099
--- /dev/null
+++ b/releasenotes/notes/storpool-iscsi-cefcfe590a07c5c7.yaml
@@ -0,0 +1,15 @@
+features:
+ - |
+ StorPool driver: Added support for exporting the StorPool-backed
+ volumes using the iSCSI protocol, so that the Cinder volume service
+ and/or the Nova or Glance consumers do not need to have the StorPool
+ block device third-party service installed. See the StorPool driver
+ section in the Cinder documentation for more information on the
+ ``storpool_iscsi_export_to``, ``storpool_iscsi_portal_group``,
+ ``storpool_iscsi_cinder_volume``, and
+ ``storpool_iscsi_learn_initiator_iqns`` options.
+
+ .. note::
+ Multiattach support for StorPool is now only enabled if
+ ``storpool_iscsi_export_to`` is set to ``*``, that is, when all
+ StorPool volumes will be exported via iSCSI to all initiators.
--
2.43.0