diff --git a/api-ref/source/samples/share-access-rules-update-request.json b/api-ref/source/samples/share-access-rules-update-request.json new file mode 100644 index 0000000000..06e9b2ce6d --- /dev/null +++ b/api-ref/source/samples/share-access-rules-update-request.json @@ -0,0 +1,5 @@ +{ + "update_access": { + "access_level": "ro" + } +} diff --git a/api-ref/source/samples/share-access-rules-update-response.json b/api-ref/source/samples/share-access-rules-update-response.json new file mode 100644 index 0000000000..d66866dc9f --- /dev/null +++ b/api-ref/source/samples/share-access-rules-update-response.json @@ -0,0 +1,17 @@ +{ + "access": { + "access_level": "ro", + "state": "error", + "id": "507bf114-36f2-4f56-8cf4-857985ca87c1", + "share_id": "fb213952-2352-41b4-ad7b-2c4c69d13eef", + "access_type": "ip", + "access_to": "0.0.0.0/0", + "access_key": null, + "created_at": "2024-12-17T02:01:04.000000", + "updated_at": "2024-12-17T02:01:04.000000", + "metadata": { + "key1": "value1", + "key2": "value2" + } + } +} diff --git a/api-ref/source/share-access-rules.inc b/api-ref/source/share-access-rules.inc index c83b1556a2..f2996d8463 100644 --- a/api-ref/source/share-access-rules.inc +++ b/api-ref/source/share-access-rules.inc @@ -131,3 +131,64 @@ Response example .. literalinclude:: samples/share-access-rules-list-response.json :language: javascript + + +Update share access rule +~~~~~~~~~~~~~~~~~~~~~~~~ + +.. rest_method:: PUT /v2/share-access-rules/{access_id} + +.. versionadded:: 2.88 + +Update ``access_level`` of a specified access rule. + +Response codes +-------------- + +.. rest_status_code:: success status.yaml + + - 200 + +.. rest_status_code:: error status.yaml + + - 400 + - 401 + - 403 + - 404 + +Request +------- + +.. rest_parameters:: parameters.yaml + + - project_id: project_id_path + - access_id: access_id_path + - access_level: access_level + +Request example +--------------- + +.. literalinclude:: samples/share-access-rules-update-request.json + :language: javascript + +Response parameters +------------------- + +.. rest_parameters:: parameters.yaml + + - share_id: access_share_id + - created_at: created_at + - updated_at: updated_at + - access_type: access_type + - access_to: access_to + - access_key: access_key_share_access_rules + - state: state + - access_level: access_level + - id: access_rule_id + - metadata: access_metadata + +Response example +---------------- + +.. literalinclude:: samples/share-access-rules-update-response.json + :language: javascript diff --git a/doc/source/admin/shared-file-systems-crud-share.rst b/doc/source/admin/shared-file-systems-crud-share.rst index b15cf26172..6adcfecff6 100644 --- a/doc/source/admin/shared-file-systems-crud-share.rst +++ b/doc/source/admin/shared-file-systems-crud-share.rst @@ -501,6 +501,17 @@ You can update the metadata: You also can unset the metadata using **manila metadata unset **. +.. note:: + In case you want to prevent certain metadata key-values to be manipulated by + less privileged users, you can provide a list of such keys through the admin + only metadata configuration option listed in the + :ref:`additional configuration options page `. + + In case you want to pass certain metadata key-values to be consumed by share + drivers, you can provide a list of such keys through the driver updatable + metadata configuration option listed in the + :ref:`additional configuration options page `. + Reset share state ----------------- diff --git a/doc/source/configuration/tables/manila-common.inc b/doc/source/configuration/tables/manila-common.inc index 5b6686d47e..9c18ae2918 100644 --- a/doc/source/configuration/tables/manila-common.inc +++ b/doc/source/configuration/tables/manila-common.inc @@ -110,6 +110,12 @@ - (Boolean) Treat X-Forwarded-For as the canonical remote address. Only enable this if you have a sanitizing proxy. * - ``wsgi_keep_alive`` = ``True`` - (Boolean) If False, closes the client socket connection explicitly. Setting it to True to maintain backward compatibility. Recommended setting is set it to False. + * - ``admin_only_metadata`` = ``__affinity_same_host,__affinity_different_host`` + - (List) The affinity keys are default to ensure backwards compatibility. Update the list with metadata items that should only be manipulated by people allowed by the "update_admin_only_metadata" policy. + * - ``driver_updatable_metadata`` = ``None`` + - (List) Metadata keys that will decide which share metadata can be passed to share drivers as part of metadata create/update operations. + * - ``driver_updatable_subnet_metadata`` = ``None`` + - (List) Metadata keys that will decide which share network_subnet_metadata can be passed to share drivers as part of metadata create/update operations. * - **[coordination]** - * - ``backend_url`` = ``file://$state_path`` diff --git a/doc/source/user/share-network-subnet-operations.rst b/doc/source/user/share-network-subnet-operations.rst index f7c3cc7459..b59d9d9acc 100644 --- a/doc/source/user/share-network-subnet-operations.rst +++ b/doc/source/user/share-network-subnet-operations.rst @@ -12,7 +12,10 @@ commands. You can create multiple subnets in a share network, and if you do not specify an availability zone, the subnet you are creating will be considered default by the Shared File Systems service. The default subnet spans all availability zones. You cannot have more than one default subnet -per share network. +per share network. During share server migration, metadata belonging to the +old share network subnet is ignored when moving to a new share network. Since +metadata updates are passed to backend driver, with migration of share network +these metadata updates will no longer be available to new share network. .. important:: diff --git a/manila/api/openstack/api_version_request.py b/manila/api/openstack/api_version_request.py index 0fbcd5e9f4..89973f03de 100644 --- a/manila/api/openstack/api_version_request.py +++ b/manila/api/openstack/api_version_request.py @@ -203,13 +203,18 @@ * 2.83 - Added 'disabled_reason' field to services. * 2.84 - Added mount_point_name to shares. * 2.85 - Added backup_type field to share backups. + * 2.86 - Add ensure share API. + * 2.87 - Added Share export location metadata API + * 2.88 - Added support for update Share access rule. + * 2.89 - Added support for passing Share network subnet metadata updates + to driver. """ # The minimum and maximum versions of the API supported # The default api version request is defined to be the # minimum version of the API supported. _MIN_API_VERSION = "2.0" -_MAX_API_VERSION = "2.85" +_MAX_API_VERSION = "2.89" DEFAULT_API_VERSION = _MIN_API_VERSION diff --git a/manila/api/openstack/rest_api_version_history.rst b/manila/api/openstack/rest_api_version_history.rst index d1023ae879..a5a8c682b1 100644 --- a/manila/api/openstack/rest_api_version_history.rst +++ b/manila/api/openstack/rest_api_version_history.rst @@ -458,3 +458,22 @@ user documentation. 2.85 ---- Added ``backup_type`` field to share backup object. + +2.86 +---- + Added ensure shares API. + +2.87 +---- + Added Metadata API methods (GET, PUT, POST, DELETE) + to Share Export Locations. + +2.88 +---- + Allows updating the access rule's access type. + + +2.89 +---- + Added support for passing share network subnet metadata updates to share + backend driver. diff --git a/manila/api/v1/shares.py b/manila/api/v1/shares.py index 0394770f66..aceda9841a 100644 --- a/manila/api/v1/shares.py +++ b/manila/api/v1/shares.py @@ -589,12 +589,14 @@ def _check_for_access_rule_locks(self, context, access_data, access_id, unrestrict = access_data.get('unrestrict', False) search_opts = { 'resource_id': access_id, - 'resource_action': constants.RESOURCE_ACTION_DELETE + 'resource_action': constants.RESOURCE_ACTION_DELETE, + 'all_projects': True, } locks, locks_count = ( self.resource_locks_api.get_all( - context, search_opts=search_opts, show_count=True) or [] + context.elevated(), search_opts=search_opts, + show_count=True) or [] ) # no locks placed, nothing to do @@ -620,7 +622,9 @@ def raise_rule_is_locked(share_id, unrestrict=False): try: self.resource_locks_api.ensure_context_can_delete_lock( context, lock['id']) - except exception.NotAuthorized: + except (exception.NotAuthorized, exception.ResourceLockNotFound): + # If it is not found, then it means that the context doesn't + # have access to this resource and should be denied. non_deletable_locks.append(lock) if non_deletable_locks: diff --git a/manila/api/v2/metadata.py b/manila/api/v2/metadata.py index e3ab4253cf..9f1520c003 100644 --- a/manila/api/v2/metadata.py +++ b/manila/api/v2/metadata.py @@ -28,36 +28,42 @@ class MetadataController(object): "share": "share_get", "share_snapshot": "share_snapshot_get", "share_network_subnet": "share_network_subnet_get", + "share_export_location": "export_location_get_by_uuid", } resource_metadata_get = { "share": "share_metadata_get", "share_snapshot": "share_snapshot_metadata_get", "share_network_subnet": "share_network_subnet_metadata_get", + "share_export_location": "export_location_metadata_get", } resource_metadata_get_item = { "share": "share_metadata_get_item", "share_snapshot": "share_snapshot_metadata_get_item", "share_network_subnet": "share_network_subnet_metadata_get_item", + "share_export_location": "export_location_metadata_get_item", } resource_metadata_update = { "share": "share_metadata_update", "share_snapshot": "share_snapshot_metadata_update", "share_network_subnet": "share_network_subnet_metadata_update", + "share_export_location": "export_location_metadata_update", } resource_metadata_update_item = { "share": "share_metadata_update_item", "share_snapshot": "share_snapshot_metadata_update_item", "share_network_subnet": "share_network_subnet_metadata_update_item", + "share_export_location": "export_location_metadata_update_item", } resource_metadata_delete = { "share": "share_metadata_delete", "share_snapshot": "share_snapshot_metadata_delete", "share_network_subnet": "share_network_subnet_metadata_delete", + "share_export_location": "export_location_metadata_delete", } resource_policy_get = { @@ -72,10 +78,13 @@ def __init__(self): def _get_resource(self, context, resource_id, for_modification=False, parent_id=None): - if self.resource_name in ['share', 'share_network_subnet']: - # we would allow retrieving some "public" resources - # across project namespaces except share snapshots, - # project_only=True is hard coded + if self.resource_name in ['share', 'share_network_subnet', + 'share_export_location']: + # some resources don't have a "project_id" field (like + # share_export_location or share_network_subnet), + # and sometimes we want to retrieve "public" resources + # (like shares), so avoid hard coding project_only=True in the + # lookup where necessary kwargs = {} else: kwargs = {'project_only': True} @@ -86,23 +95,25 @@ def _get_resource(self, context, resource_id, kwargs["parent_id"] = parent_id res = get_res_method(context, resource_id, **kwargs) - get_policy = self.resource_policy_get[self.resource_name] - if res.get('is_public') is False: - authorized = policy.check_policy(context, - self.resource_name, - get_policy, - res, - do_raise=False) - if not authorized: - # Raising NotFound to prevent existence detection - raise exception.NotFound() - elif for_modification: - # a public resource's metadata can be viewed, but not - # modified by non owners - policy.check_policy(context, - self.resource_name, - get_policy, - res) + if self.resource_name not in ["share_export_location"]: + get_policy = self.resource_policy_get[self.resource_name] + # skip policy check for export locations + if res.get('is_public') is False: + authorized = policy.check_policy(context, + self.resource_name, + get_policy, + res, + do_raise=False) + if not authorized: + # Raising NotFound to prevent existence detection + raise exception.NotFound() + elif for_modification: + # a public resource's metadata can be viewed, but not + # modified by non owners + policy.check_policy(context, + self.resource_name, + get_policy, + res) except exception.NotFound: msg = _('%s not found.' % self.resource_name.capitalize()) raise exc.HTTPNotFound(explanation=msg) @@ -120,6 +131,7 @@ def _get_metadata(self, context, resource_id, parent_id=None): @wsgi.response(200) def _index_metadata(self, req, resource_id, parent_id=None): + """Lists existing metadata.""" context = req.environ['manila.context'] metadata = self._get_metadata(context, resource_id, parent_id=parent_id) diff --git a/manila/api/v2/router.py b/manila/api/v2/router.py index b4f5a485ce..33e7db481f 100644 --- a/manila/api/v2/router.py +++ b/manila/api/v2/router.py @@ -102,6 +102,13 @@ def _setup_routes(self, mapper): mapper.resource("service", "services", controller=self.resources["services"]) + for path_prefix in ['/{project_id}', '']: + # project_id is optional + mapper.connect("services", + "%s/services/ensure-shares" % path_prefix, + controller=self.resources["services"], + action="ensure_shares", + conditions={"method": ["POST"]}) self.resources["quota_sets_legacy"] = ( quota_sets.create_resource_legacy()) @@ -252,6 +259,45 @@ def _setup_routes(self, mapper): controller=self.resources["share_export_locations"], action="show", conditions={"method": ["GET"]}) + mapper.connect("export_locations_metadata", + "%s/shares/{share_id}/export_locations" + "/{resource_id}/metadata" % path_prefix, + controller=self.resources["share_export_locations"], + action="create_metadata", + conditions={"method": ["POST"]}) + mapper.connect("export_locations_metadata", + "%s/shares/{share_id}/export_locations" + "/{resource_id}/metadata" % path_prefix, + controller=self.resources["share_export_locations"], + action="update_all_metadata", + conditions={"method": ["PUT"]}) + mapper.connect("export_locations_metadata", + "%s/shares/{share_id}/export_locations/" + "{resource_id}/metadata/{key}" + % path_prefix, + controller=self.resources["share_export_locations"], + action="update_metadata_item", + conditions={"method": ["POST"]}) + mapper.connect("export_locations_metadata", + "%s/shares/{share_id}/export_locations/" + "{resource_id}/metadata" % path_prefix, + controller=self.resources["share_export_locations"], + action="index_metadata", + conditions={"method": ["GET"]}) + mapper.connect("export_locations_metadata", + "%s/shares/{share_id}/export_locations/" + "{resource_id}/metadata/{key}" + % path_prefix, + controller=self.resources["share_export_locations"], + action="show_metadata", + conditions={"method": ["GET"]}) + mapper.connect("export_locations_metadata", + "%s/shares/{share_id}/export_locations/" + "{resource_id}/metadata/{key}" + % path_prefix, + controller=self.resources["share_export_locations"], + action="delete_metadata", + conditions={"method": ["DELETE"]}) self.resources["snapshots"] = share_snapshots.create_resource() mapper.resource("snapshot", "snapshots", diff --git a/manila/api/v2/services.py b/manila/api/v2/services.py index 8ff40bfb63..226a17321b 100644 --- a/manila/api/v2/services.py +++ b/manila/api/v2/services.py @@ -14,13 +14,16 @@ # License for the specific language governing permissions and limitations # under the License. +import http.client as http_client from oslo_utils import strutils import webob.exc from manila.api.openstack import wsgi from manila.api.views import services as services_views from manila import db +from manila import exception from manila.i18n import _ +from manila.services import api as service_api class ServiceMixin(object): @@ -34,7 +37,7 @@ class ServiceMixin(object): _view_builder_class = services_views.ViewBuilder @wsgi.Controller.authorize("index") - def _index(self, req): + def _index(self, req, support_ensure_shares=False): """Return a list of all running services.""" context = req.environ['manila.context'] @@ -42,7 +45,7 @@ def _index(self, req): services = [] for service in all_services: - service = { + service_data = { 'id': service['id'], 'binary': service['binary'], 'host': service['host'], @@ -52,7 +55,9 @@ def _index(self, req): 'state': service['state'], 'updated_at': service['updated_at'], } - services.append(service) + if support_ensure_shares: + service_data['ensuring'] = service['ensuring'] + services.append(service_data) search_opts = [ 'host', @@ -141,10 +146,18 @@ class ServiceController(ServiceMixin, wsgi.Controller): Registered under API URL 'services'. """ - @wsgi.Controller.api_version('2.7') + def __init__(self): + super().__init__() + self.service_api = service_api.API() + + @wsgi.Controller.api_version('2.7', '2.85') def index(self, req): return self._index(req) + @wsgi.Controller.api_version('2.86') # noqa + def index(self, req): # pylint: disable=function-redefined # noqa F811 + return self._index(req, support_ensure_shares=True) + @wsgi.Controller.api_version('2.7', '2.82') def update(self, req, id, body): return self._update(req, id, body, support_disabled_reason=False) @@ -153,6 +166,31 @@ def update(self, req, id, body): def update(self, req, id, body): # pylint: disable=function-redefined # noqa F811 return self._update(req, id, body) + @wsgi.Controller.api_version('2.86') + @wsgi.Controller.authorize + def ensure_shares(self, req, body): + """Starts ensure shares for a given manila-share binary.""" + context = req.environ['manila.context'] + + host = body.get('host', None) + if not host: + raise webob.exc.HTTPBadRequest('Missing host parameter.') + + try: + # The only binary supported is Manila share. + service = db.service_get_by_args(context, host, 'manila-share') + except exception.NotFound: + raise webob.exc.HTTPNotFound( + "manila-share binary for '%s' host not found" % id + ) + + try: + self.service_api.ensure_shares(context, service, host) + except webob.exc.HTTPConflict: + raise + + return webob.Response(status_int=http_client.ACCEPTED) + def create_resource_legacy(): return wsgi.Resource(ServiceControllerLegacy()) diff --git a/manila/api/v2/share_accesses.py b/manila/api/v2/share_accesses.py index e139cfac33..78148a52a3 100644 --- a/manila/api/v2/share_accesses.py +++ b/manila/api/v2/share_accesses.py @@ -55,10 +55,11 @@ def _is_rule_restricted(self, context, id): search_opts = { 'resource_id': id, 'resource_action': constants.RESOURCE_ACTION_SHOW, - 'resource_type': 'access_rule' + 'resource_type': 'access_rule', + 'all_projects': True, } locks, count = self.resource_locks_api.get_all( - context, search_opts, show_count=True) + context.elevated(), search_opts, show_count=True) if count: return self.resource_locks_api.access_is_restricted(context, @@ -143,6 +144,38 @@ def index(self, req): def index(self, req): # pylint: disable=function-redefined # noqa F811 return self._index(req, support_for_access_filters=True) + @wsgi.Controller.api_version('2.88') + @wsgi.Controller.authorize('update') + def update(self, req, id, body): + """Update access_level about the given share access rule.""" + context = req.environ['manila.context'] + if not self.is_valid_body(body, 'update_access'): + raise webob.exc.HTTPBadRequest() + + access_data = body['update_access'] + access_level = access_data.get('access_level', None) + if not access_level: + msg = _("Invalid input. Missing 'access_level' in " + "update request.") + raise webob.exc.HTTPBadRequest(explanation=msg) + + if access_level not in constants.ACCESS_LEVELS: + msg = _("Invalid or unsupported share access " + "level: %s.") % access_level + raise webob.exc.HTTPBadRequest(explanation=msg) + + share_access = self._get_share_access(context, id) + if access_level == share_access.access_level: + return self._view_builder.view(req, share_access) + + share = self.share_api.get(context, share_access.share_id) + values = { + 'access_level': access_level, + } + access = self.share_api.update_access( + context, share, share_access, values) + return self._view_builder.view(req, access) + def create_resource(): return wsgi.Resource(ShareAccessesController()) diff --git a/manila/api/v2/share_export_locations.py b/manila/api/v2/share_export_locations.py index 1da599749d..4e5fe33438 100644 --- a/manila/api/v2/share_export_locations.py +++ b/manila/api/v2/share_export_locations.py @@ -13,23 +13,33 @@ # License for the specific language governing permissions and limitations # under the License. +from oslo_config import cfg +from oslo_log import log from webob import exc from manila.api.openstack import wsgi +from manila.api.v2 import metadata from manila.api.views import export_locations as export_locations_views from manila.db import api as db_api from manila import exception from manila.i18n import _ from manila import policy +LOG = log.getLogger(__name__) +CONF = cfg.CONF -class ShareExportLocationController(wsgi.Controller): + +class ShareExportLocationController(wsgi.Controller, + metadata.MetadataController): """The Share Export Locations API controller.""" def __init__(self): self._view_builder_class = export_locations_views.ViewBuilder self.resource_name = 'share_export_location' super(ShareExportLocationController, self).__init__() + self._conf_admin_only_metadata_keys = getattr( + CONF, 'admin_only_el_metadata', [] + ) def _verify_share(self, context, share_id): try: @@ -94,6 +104,96 @@ def show(self, req, share_id, # pylint: disable=function-redefined # noqa F811 return self._show(req, share_id, export_location_uuid, ignore_secondary_replicas=True) + def _validate_metadata_for_update(self, req, share_export_location, + metadata, delete=True): + persistent_keys = set(self._conf_admin_only_metadata_keys) + context = req.environ['manila.context'] + if set(metadata).intersection(persistent_keys): + try: + policy.check_policy( + context, 'share_export_location', + 'update_admin_only_metadata') + except exception.PolicyNotAuthorized: + msg = _("Cannot set or update admin only metadata.") + LOG.exception(msg) + raise exc.HTTPForbidden(explanation=msg) + persistent_keys = [] + + current_export_metadata = db_api.export_location_metadata_get( + context, share_export_location) + if delete: + _metadata = metadata + for key in persistent_keys: + if key in current_export_metadata: + _metadata[key] = current_export_metadata[key] + else: + metadata_copy = metadata.copy() + for key in persistent_keys: + metadata_copy.pop(key, None) + _metadata = current_export_metadata.copy() + _metadata.update(metadata_copy) + + return _metadata + + @wsgi.Controller.api_version("2.87") + @wsgi.Controller.authorize("get_metadata") + def index_metadata(self, req, share_id, resource_id): + """Returns the list of metadata for a given share export location.""" + context = req.environ['manila.context'] + self._verify_share(context, share_id) + return self._index_metadata(req, resource_id) + + @wsgi.Controller.api_version("2.87") + @wsgi.Controller.authorize("update_metadata") + def create_metadata(self, req, share_id, resource_id, body): + """Create metadata for a given share export location.""" + _metadata = self._validate_metadata_for_update(req, resource_id, + body['metadata'], + delete=False) + body['metadata'] = _metadata + context = req.environ['manila.context'] + self._verify_share(context, share_id) + return self._create_metadata(req, resource_id, body) + + @wsgi.Controller.api_version("2.87") + @wsgi.Controller.authorize("update_metadata") + def update_all_metadata(self, req, share_id, resource_id, body): + """Update entire metadata for a given share export location.""" + _metadata = self._validate_metadata_for_update(req, resource_id, + body['metadata']) + body['metadata'] = _metadata + context = req.environ['manila.context'] + self._verify_share(context, share_id) + return self._update_all_metadata(req, resource_id, body) + + @wsgi.Controller.api_version("2.87") + @wsgi.Controller.authorize("update_metadata") + def update_metadata_item(self, req, share_id, resource_id, body, key): + """Update metadata item for a given share export location.""" + _metadata = self._validate_metadata_for_update(req, resource_id, + body['metadata'], + delete=False) + body['metadata'] = _metadata + context = req.environ['manila.context'] + self._verify_share(context, share_id) + return self._update_metadata_item(req, resource_id, body, key) + + @wsgi.Controller.api_version("2.87") + @wsgi.Controller.authorize("get_metadata") + def show_metadata(self, req, share_id, resource_id, key): + """Show metadata for a given share export location.""" + context = req.environ['manila.context'] + self._verify_share(context, share_id) + return self._show_metadata(req, resource_id, key) + + @wsgi.Controller.api_version("2.87") + @wsgi.Controller.authorize("delete_metadata") + def delete_metadata(self, req, share_id, resource_id, key): + """Delete metadata for a given share export location.""" + context = req.environ['manila.context'] + self._verify_share(context, share_id) + return self._delete_metadata(req, resource_id, key) + def create_resource(): return wsgi.Resource(ShareExportLocationController()) diff --git a/manila/api/v2/share_network_subnets.py b/manila/api/v2/share_network_subnets.py index d41bbeccf9..936397c68a 100644 --- a/manila/api/v2/share_network_subnets.py +++ b/manila/api/v2/share_network_subnets.py @@ -15,7 +15,7 @@ from http import client as http_client -from manila.api import common +from oslo_config import cfg from oslo_db import exception as db_exception from oslo_log import log import webob @@ -29,10 +29,13 @@ from manila.db import api as db_api from manila import exception from manila.i18n import _ +from manila.message import api as message_api +from manila.message import message_field from manila import share from manila.share import rpcapi as share_rpcapi LOG = log.getLogger(__name__) +CONF = cfg.CONF class ShareNetworkSubnetController(wsgi.Controller, @@ -46,6 +49,7 @@ def __init__(self): super(ShareNetworkSubnetController, self).__init__() self.share_rpcapi = share_rpcapi.ShareAPI() self.share_api = share.API() + self.message_api = message_api.API() @wsgi.Controller.api_version("2.51") @wsgi.Controller.authorize @@ -128,7 +132,7 @@ def create(self, req, share_network_id, body): data['share_network_id'] = share_network_id multiple_subnet_support = (req.api_version_request >= api_version.APIVersionRequest("2.70")) - share_network, existing_subnets = common.validate_subnet_create( + share_network, existing_subnets = api_common.validate_subnet_create( context, share_network_id, data, multiple_subnet_support) # create subnet operation on subnets with share servers means that an @@ -160,6 +164,15 @@ def create(self, req, share_network_id, body): try: share_network_subnet = db_api.share_network_subnet_create( context, data) + metadata_support = (req.api_version_request >= + api_version.APIVersionRequest("2.89")) + if metadata_support and data.get('metadata'): + context = req.environ['manila.context'] + self.share_api.update_share_network_subnet_from_metadata( + context, + share_network_id, + share_network_subnet['id'], + data.get('metadata')) except db_exception.DBError as e: msg = _('Could not create the share network subnet.') LOG.error(e) @@ -201,23 +214,47 @@ def index_metadata(self, req, share_network_id, resource_id): @wsgi.Controller.authorize("update_metadata") def create_metadata(self, req, share_network_id, resource_id, body): """Create metadata for a given share network subnet.""" - return self._create_metadata(req, resource_id, body, - parent_id=share_network_id) + metadata = self._create_metadata(req, resource_id, body, + parent_id=share_network_id) + if req.api_version_request >= api_version.APIVersionRequest("2.89"): + context = req.environ['manila.context'] + self.share_api.update_share_network_subnet_from_metadata( + context, + share_network_id, + resource_id, + metadata.get('metadata')) + return metadata @wsgi.Controller.api_version("2.78") @wsgi.Controller.authorize("update_metadata") def update_all_metadata(self, req, share_network_id, resource_id, body): """Update entire metadata for a given share network subnet.""" - return self._update_all_metadata(req, resource_id, body, - parent_id=share_network_id) + metadata = self._update_all_metadata(req, resource_id, body, + parent_id=share_network_id) + if req.api_version_request >= api_version.APIVersionRequest("2.89"): + context = req.environ['manila.context'] + self.share_api.update_share_network_subnet_from_metadata( + context, + share_network_id, + resource_id, + metadata.get('metadata')) + return metadata @wsgi.Controller.api_version("2.78") @wsgi.Controller.authorize("update_metadata") def update_metadata_item(self, req, share_network_id, resource_id, body, key): """Update metadata item for a given share network subnet.""" - return self._update_metadata_item(req, resource_id, body, key, - parent_id=share_network_id) + metadata = self._update_metadata_item(req, resource_id, body, key, + parent_id=share_network_id) + if req.api_version_request >= api_version.APIVersionRequest("2.89"): + context = req.environ['manila.context'] + self.share_api.update_share_network_subnet_from_metadata( + context, + share_network_id, + resource_id, + metadata.get('metadata')) + return metadata @wsgi.Controller.api_version("2.78") @wsgi.Controller.authorize("get_metadata") @@ -230,6 +267,20 @@ def show_metadata(self, req, share_network_id, resource_id, key): @wsgi.Controller.authorize("delete_metadata") def delete_metadata(self, req, share_network_id, resource_id, key): """Delete metadata for a given share network subnet.""" + if req.api_version_request >= api_version.APIVersionRequest("2.89"): + driver_keys = getattr( + CONF, 'driver_updatable_subnet_metadata', []) + if key in driver_keys: + context = req.environ['manila.context'] + share_network = db_api.share_network_get( + context, share_network_id) + self.message_api.create( + context, + message_field.Action.UPDATE_METADATA, + share_network['project_id'], + resource_type=message_field.Resource.SHARE_NETWORK_SUBNET, + resource_id=resource_id, + detail=message_field.Detail.UPDATE_METADATA_NOT_DELETED) return self._delete_metadata(req, resource_id, key, parent_id=share_network_id) diff --git a/manila/api/v2/shares.py b/manila/api/v2/shares.py index 4d4b9ca0bd..f79bf0d599 100644 --- a/manila/api/v2/shares.py +++ b/manila/api/v2/shares.py @@ -627,9 +627,9 @@ def detail(self, req): def _validate_metadata_for_update(self, req, share_id, metadata, delete=True): - admin_metadata_ignore_keys = set(self._conf_admin_only_metadata_keys) + persistent_keys = set(self._conf_admin_only_metadata_keys) context = req.environ['manila.context'] - if set(metadata).intersection(admin_metadata_ignore_keys): + if set(metadata).intersection(persistent_keys): try: policy.check_policy( context, 'share', 'update_admin_only_metadata') @@ -637,17 +637,17 @@ def _validate_metadata_for_update(self, req, share_id, metadata, msg = _("Cannot set or update admin only metadata.") LOG.exception(msg) raise exc.HTTPForbidden(explanation=msg) - admin_metadata_ignore_keys = [] + persistent_keys = [] current_share_metadata = db.share_metadata_get(context, share_id) if delete: _metadata = metadata - for key in admin_metadata_ignore_keys: + for key in persistent_keys: if key in current_share_metadata: _metadata[key] = current_share_metadata[key] else: metadata_copy = metadata.copy() - for key in admin_metadata_ignore_keys: + for key in persistent_keys: metadata_copy.pop(key, None) _metadata = current_share_metadata.copy() _metadata.update(metadata_copy) @@ -671,7 +671,12 @@ def create_metadata(self, req, resource_id, body): body['metadata'], delete=False) body['metadata'] = _metadata - return self._create_metadata(req, resource_id, body) + metadata = self._create_metadata(req, resource_id, body) + + context = req.environ['manila.context'] + self.share_api.update_share_from_metadata(context, resource_id, + metadata.get('metadata')) + return metadata @wsgi.Controller.api_version("2.0") @wsgi.Controller.authorize("update_share_metadata") @@ -682,7 +687,12 @@ def update_all_metadata(self, req, resource_id, body): _metadata = self._validate_metadata_for_update(req, resource_id, body['metadata']) body['metadata'] = _metadata - return self._update_all_metadata(req, resource_id, body) + metadata = self._update_all_metadata(req, resource_id, body) + + context = req.environ['manila.context'] + self.share_api.update_share_from_metadata(context, resource_id, + metadata.get('metadata')) + return metadata @wsgi.Controller.api_version("2.0") @wsgi.Controller.authorize("update_share_metadata") @@ -694,7 +704,12 @@ def update_metadata_item(self, req, resource_id, body, key): body['metadata'], delete=False) body['metadata'] = _metadata - return self._update_metadata_item(req, resource_id, body, key) + metadata = self._update_metadata_item(req, resource_id, body, key) + + context = req.environ['manila.context'] + self.share_api.update_share_from_metadata(context, resource_id, + metadata.get('metadata')) + return metadata @wsgi.Controller.api_version("2.0") @wsgi.Controller.authorize("get_share_metadata") diff --git a/manila/api/views/export_locations.py b/manila/api/views/export_locations.py index ea71d031c5..8f2e787b9a 100644 --- a/manila/api/views/export_locations.py +++ b/manila/api/views/export_locations.py @@ -13,6 +13,8 @@ # License for the specific language governing permissions and limitations # under the License. +import copy + from oslo_utils import strutils from manila.api import common @@ -25,6 +27,7 @@ class ViewBuilder(common.ViewBuilder): _detail_version_modifiers = [ 'add_preferred_path_attribute', + 'add_metadata_attribute', ] def _get_export_location_view(self, request, export_location, @@ -87,3 +90,11 @@ def add_preferred_path_attribute(self, context, view_dict, export_location): view_dict['preferred'] = strutils.bool_from_string( export_location['el_metadata'].get('preferred')) + + @common.ViewBuilder.versioned_method('2.87') + def add_metadata_attribute(self, context, view_dict, + export_location): + metadata = export_location.get('el_metadata') + meta_copy = copy.copy(metadata) + meta_copy.pop('preferred', None) + view_dict['metadata'] = meta_copy diff --git a/manila/api/views/services.py b/manila/api/views/services.py index d0db7df3c9..3769c83101 100644 --- a/manila/api/views/services.py +++ b/manila/api/views/services.py @@ -21,6 +21,7 @@ class ViewBuilder(common.ViewBuilder): _collection_name = "services" _detail_version_modifiers = [ "add_disabled_reason_field", + "add_ensuring_field", ] def summary(self, request, service): @@ -49,3 +50,7 @@ def add_disabled_reason_field(self, context, service_dict, service): service_dict.pop('disabled', None) service_dict['status'] = service.get('status') service_dict['disabled_reason'] = service.get('disabled_reason') + + @common.ViewBuilder.versioned_method("2.86") + def add_ensuring_field(self, context, service_dict, service): + service_dict['ensuring'] = service.get('ensuring') diff --git a/manila/common/config.py b/manila/common/config.py index a52bfd07da..5f242b3ad7 100644 --- a/manila/common/config.py +++ b/manila/common/config.py @@ -141,6 +141,28 @@ default=constants.AdminOnlyMetadata.SCHEDULER_FILTERS, help='Metadata keys that should only be manipulated by ' 'administrators.'), + cfg.ListOpt('driver_updatable_metadata', + default=[], + help='Metadata keys that will decide which share metadata ' + '(element of the list is , ' + 'i.e max_files) can be passed to share drivers as part ' + 'of metadata create/update operations.'), + cfg.ListOpt('driver_updatable_subnet_metadata', + default=[], + help='Metadata keys that will decide which share network ' + 'subnet metadata (element of the list is ' + ', e.g. pnfs) can be passed to ' + 'share drivers as part of metadata create/update ' + 'operations.'), + cfg.BoolOpt('update_shares_status_on_ensure', + default=True, + help='Whether Manila should update the status of all shares ' + 'within a backend during ongoing ensure_shares ' + 'run.'), + cfg.ListOpt('admin_only_el_metadata', + default=constants.AdminOnlyMetadata.EXPORT_LOCATION_KEYS, + help='Metadata keys for export locations that should only be ' + 'manipulated by administrators.'), ] CONF.register_opts(global_opts) diff --git a/manila/common/constants.py b/manila/common/constants.py index 0907c7b24d..59c0aba6be 100644 --- a/manila/common/constants.py +++ b/manila/common/constants.py @@ -53,6 +53,7 @@ STATUS_BACKUP_CREATING = 'backup_creating' STATUS_BACKUP_RESTORING = 'backup_restoring' STATUS_BACKUP_RESTORING_ERROR = 'backup_restoring_error' +STATUS_ENSURING = 'ensuring' # Transfer resource type SHARE_RESOURCE_TYPE = 'share' @@ -61,8 +62,10 @@ # Access rule states ACCESS_STATE_QUEUED_TO_APPLY = 'queued_to_apply' ACCESS_STATE_QUEUED_TO_DENY = 'queued_to_deny' +ACCESS_STATE_QUEUED_TO_UPDATE = 'queued_to_update' ACCESS_STATE_APPLYING = 'applying' ACCESS_STATE_DENYING = 'denying' +ACCESS_STATE_UPDATING = 'updating' ACCESS_STATE_ACTIVE = 'active' ACCESS_STATE_ERROR = 'error' ACCESS_STATE_DELETED = 'deleted' @@ -91,8 +94,10 @@ ACCESS_RULES_STATES = ( ACCESS_STATE_QUEUED_TO_APPLY, ACCESS_STATE_QUEUED_TO_DENY, + ACCESS_STATE_QUEUED_TO_UPDATE, ACCESS_STATE_APPLYING, ACCESS_STATE_DENYING, + ACCESS_STATE_UPDATING, ACCESS_STATE_ACTIVE, ACCESS_STATE_ERROR, ACCESS_STATE_DELETED, @@ -144,6 +149,7 @@ STATUS_RESTORING, STATUS_REVERTING, STATUS_SERVER_MIGRATING, STATUS_SERVER_MIGRATING_TO, STATUS_BACKUP_RESTORING, STATUS_BACKUP_CREATING, + STATUS_ENSURING, ) INVALID_SHARE_INSTANCE_STATUSES_FOR_ACCESS_RULE_UPDATES = ( @@ -358,8 +364,13 @@ class ExtraSpecs(object): class AdminOnlyMetadata(object): AFFINITY_KEY = "__affinity_same_host" ANTI_AFFINITY_KEY = "__affinity_different_host" + PREFERRED_KEY = "preferred" SCHEDULER_FILTERS = [ AFFINITY_KEY, ANTI_AFFINITY_KEY, ] + + EXPORT_LOCATION_KEYS = [ + PREFERRED_KEY, + ] diff --git a/manila/db/api.py b/manila/db/api.py index 2fa080f7c7..ce0a4ea729 100644 --- a/manila/db/api.py +++ b/manila/db/api.py @@ -564,6 +564,11 @@ def share_access_create(context, values): return IMPL.share_access_create(context, values) +def share_access_update(context, access_id, values): + """Update access to share.""" + return IMPL.share_access_update(context, access_id, values) + + def share_access_get(context, access_id): """Get share access rule.""" return IMPL.share_access_get(context, access_id) @@ -1003,6 +1008,13 @@ def export_location_metadata_get(context, export_location_uuid): return IMPL.export_location_metadata_get(context, export_location_uuid) +def export_location_metadata_get_item(context, export_location_uuid, key): + """Get metadata item for a share export location.""" + return IMPL.export_location_metadata_get_item(context, + export_location_uuid, + key) + + def export_location_metadata_delete(context, export_location_uuid, keys): """Delete metadata of an export location.""" return IMPL.export_location_metadata_delete( @@ -1015,6 +1027,14 @@ def export_location_metadata_update(context, export_location_uuid, metadata, return IMPL.export_location_metadata_update( context, export_location_uuid, metadata, delete) + +def export_location_metadata_update_item(context, export_location_uuid, + metadata): + """Update metadata item if it exists, otherwise create it.""" + return IMPL.export_location_metadata_update_item(context, + export_location_uuid, + metadata) + #################### @@ -1276,12 +1296,12 @@ def share_server_get_all_by_host_and_share_subnet_valid( ) -def share_server_get_all_by_host_and_share_subnet( - context, host, share_subnet_id, +def share_server_get_all_by_host_and_or_share_subnet( + context, host=None, share_subnet_id=None, ): - """Get share server DB records by host and share net.""" - return IMPL.share_server_get_all_by_host_and_share_subnet( - context, host, share_subnet_id, + """Get share server DB records by host and/or share net.""" + return IMPL.share_server_get_all_by_host_and_or_share_subnet( + context, host=host, share_subnet_id=share_subnet_id, ) diff --git a/manila/db/migrations/alembic/versions/cdefa6287df8_add_ensuring_field_to_services.py b/manila/db/migrations/alembic/versions/cdefa6287df8_add_ensuring_field_to_services.py new file mode 100644 index 0000000000..3ac69aa53a --- /dev/null +++ b/manila/db/migrations/alembic/versions/cdefa6287df8_add_ensuring_field_to_services.py @@ -0,0 +1,48 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""add-ensuring-field-to-services + +Revision ID: cdefa6287df8 +Revises: 2f27d904214c +Create Date: 2024-07-15 14:29:16.733696 + +""" + +# revision identifiers, used by Alembic. +revision = 'cdefa6287df8' +down_revision = '2f27d904214c' + +from alembic import op +from oslo_log import log +import sqlalchemy as sa + + +LOG = log.getLogger(__name__) + + +def upgrade(): + try: + op.add_column('services', sa.Column( + 'ensuring', sa.Boolean, + nullable=False, server_default=sa.sql.false())) + except Exception: + LOG.error("Column services.ensuring not created!") + raise + + +def downgrade(): + try: + op.drop_column('services', 'ensuring') + except Exception: + LOG.error("Column shares.ensuring not dropped!") + raise diff --git a/manila/db/sqlalchemy/api.py b/manila/db/sqlalchemy/api.py index 04b552763f..c9822bb8d1 100644 --- a/manila/db/sqlalchemy/api.py +++ b/manila/db/sqlalchemy/api.py @@ -2964,6 +2964,15 @@ def share_access_create(context, values): return _share_access_get(context, access_ref['id']) +@require_context +@context_manager.writer +def share_access_update(context, access_id, values): + access_ref = _share_access_get(context, access_id) + access_ref.update(values) + access_ref.save(session=context.session) + return access_ref + + @require_context @context_manager.writer def share_instance_access_create(context, values, share_instance_id): @@ -4696,6 +4705,33 @@ def _export_location_metadata_update( return metadata +@require_context +@context_manager.reader +def export_location_metadata_get_item(context, export_location_uuid, key): + + row = _export_location_metadata_get_item( + context, export_location_uuid, key) + result = {row['key']: row['value']} + + return result + + +@require_context +@context_manager.writer +def export_location_metadata_update_item(context, export_location_uuid, + item): + return _export_location_metadata_update(context, export_location_uuid, + item, delete=False) + + +def _export_location_metadata_get_item(context, export_location_uuid, key): + result = _export_location_metadata_get_query( + context, export_location_uuid, + ).filter_by(key=key).first() + if not result: + raise exception.MetadataItemNotFound() + return result + ################################### @@ -5463,14 +5499,13 @@ def share_server_get_all_by_host_and_share_subnet_valid( @require_context @context_manager.reader -def share_server_get_all_by_host_and_share_subnet( - context, host, share_subnet_id, +def share_server_get_all_by_host_and_or_share_subnet( + context, host=None, share_subnet_id=None, ): - result = _share_server_get_query( - context, - ).filter_by( - host=host, - ).filter( + result = _share_server_get_query(context) + if host: + result = result.filter_by(host=host) + result = result.filter( models.ShareServer.share_network_subnets.any(id=share_subnet_id) ).all() diff --git a/manila/db/sqlalchemy/models.py b/manila/db/sqlalchemy/models.py index d418368138..b85c018fd5 100644 --- a/manila/db/sqlalchemy/models.py +++ b/manila/db/sqlalchemy/models.py @@ -72,6 +72,7 @@ class Service(BASE, ManilaBase): availability_zone_id = Column(String(36), ForeignKey('availability_zones.id'), nullable=True) + ensuring = Column(Boolean, default=False) availability_zone = orm.relationship( "AvailabilityZone", @@ -1598,7 +1599,9 @@ def get_aggregated_access_rules_state(instance_mappings): order = (constants.ACCESS_STATE_ERROR, constants.ACCESS_STATE_DENYING, constants.ACCESS_STATE_QUEUED_TO_DENY, + constants.ACCESS_STATE_QUEUED_TO_UPDATE, constants.ACCESS_STATE_QUEUED_TO_APPLY, + constants.ACCESS_STATE_UPDATING, constants.ACCESS_STATE_APPLYING, constants.ACCESS_STATE_ACTIVE) diff --git a/manila/message/message_field.py b/manila/message/message_field.py index 5faf7c168b..44ee231e65 100644 --- a/manila/message/message_field.py +++ b/manila/message/message_field.py @@ -21,6 +21,7 @@ class Resource(object): SHARE_REPLICA = 'SHARE_REPLICA' SHARE_SNAPSHOT = 'SHARE_SNAPSHOT' SECURITY_SERVICE = 'SECURITY_SERVICE' + SHARE_NETWORK_SUBNET = 'SHARE_NETWORK_SUBNET' class Action(object): @@ -37,6 +38,7 @@ class Action(object): UPDATE_ACCESS_RULES = ('010', _('update access rules')) ADD_UPDATE_SECURITY_SERVICE = ('011', _('add or update security service')) TRANSFER_ACCEPT = ('026', _('transfer accept')) + UPDATE_METADATA = ('027', _('update_metadata')) ALL = ( ALLOCATE_HOST, CREATE, @@ -50,6 +52,7 @@ class Action(object): UPDATE_ACCESS_RULES, ADD_UPDATE_SECURITY_SERVICE, TRANSFER_ACCEPT, + UPDATE_METADATA, ) @@ -154,6 +157,18 @@ class Detail(object): "request. Share back end services are not " "ready yet. Contact your administrator in case " "retrying does not help.")) + UPDATE_METADATA_SUCCESS = ( + '029', + _("Metadata passed to share driver successfully performed required " + "operation.")) + UPDATE_METADATA_FAILURE = ( + '030', + _("Metadata passed to share driver failed to perform required " + "operation.")) + UPDATE_METADATA_NOT_DELETED = ( + '031', + _("Metadata delete operation includes driver updatable metadata, and " + "it is not passed to share driver to perform required operation.")) ALL = ( UNKNOWN_ERROR, @@ -184,6 +199,9 @@ class Detail(object): DRIVER_FAILED_TRANSFER_ACCEPT, SHARE_NETWORK_PORT_QUOTA_LIMIT_EXCEEDED, SHARE_BACKEND_NOT_READY_YET, + UPDATE_METADATA_SUCCESS, + UPDATE_METADATA_FAILURE, + UPDATE_METADATA_NOT_DELETED ) # Exception and detail mappings diff --git a/manila/policies/service.py b/manila/policies/service.py index f22772b13f..6946eb1ce2 100644 --- a/manila/policies/service.py +++ b/manila/policies/service.py @@ -34,6 +34,12 @@ deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) +deprecated_service_ensure = policy.DeprecatedRule( + name=BASE_POLICY_NAME % 'ensure_shares', + check_str=base.RULE_ADMIN_API, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='2024.2/Dalmatian' +) service_policies = [ @@ -79,6 +85,19 @@ ], deprecated_rule=deprecated_service_update ), + policy.DocumentedRuleDefault( + name=BASE_POLICY_NAME % 'ensure_shares', + check_str=base.ADMIN, + scope_types=['project'], + description="Run ensure shares for a manila-share binary.", + operations=[ + { + 'method': 'POST', + 'path': '/services/ensure', + } + ], + deprecated_rule=deprecated_service_ensure + ), ] diff --git a/manila/policies/share_access.py b/manila/policies/share_access.py index 2976f5cbd8..59f463a777 100644 --- a/manila/policies/share_access.py +++ b/manila/policies/share_access.py @@ -37,6 +37,12 @@ deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) +deprecated_access_rule_update = policy.DeprecatedRule( + name=BASE_POLICY_NAME % 'update', + check_str=base.RULE_DEFAULT, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='2025.1/Epoxy' +) share_access_rule_policies = [ @@ -67,6 +73,19 @@ ], deprecated_rule=deprecated_access_rule_index ), + policy.DocumentedRuleDefault( + name=BASE_POLICY_NAME % 'update', + check_str=base.ADMIN_OR_PROJECT_MEMBER, + scope_types=['project'], + description="Update access rules of a given share.", + operations=[ + { + 'method': 'PUT', + 'path': '/share-access-rules/{share_access_id}' + } + ], + deprecated_rule=deprecated_access_rule_update + ), ] diff --git a/manila/policies/share_export_location.py b/manila/policies/share_export_location.py index 498fa3097b..cc4f4fd730 100644 --- a/manila/policies/share_export_location.py +++ b/manila/policies/share_export_location.py @@ -34,6 +34,30 @@ deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) +deprecated_update_export_location_metadata = policy.DeprecatedRule( + name=BASE_POLICY_NAME % 'update_metadata', + check_str=base.RULE_DEFAULT, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='2024.2/Dalmatian' +) +deprecated_delete_export_location_metadata = policy.DeprecatedRule( + name=BASE_POLICY_NAME % 'delete_metadata', + check_str=base.RULE_DEFAULT, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='2024.2/Dalmatian' +) +deprecated_get_export_location_metadata = policy.DeprecatedRule( + name=BASE_POLICY_NAME % 'get_metadata', + check_str=base.RULE_DEFAULT, + deprecated_reason=DEPRECATED_REASON, + deprecated_since='2024.2/Dalmatian' +) +deprecated_update_admin_only_metadata = policy.DeprecatedRule( + name=BASE_POLICY_NAME % 'update_admin_only_metadata', + check_str=base.RULE_ADMIN_API, + deprecated_reason=DEPRECATED_REASON, + deprecated_since="2024.2/Dalmatian" +) share_export_location_policies = [ @@ -64,6 +88,79 @@ ], deprecated_rule=deprecated_export_location_show ), + policy.DocumentedRuleDefault( + name=BASE_POLICY_NAME % 'update_metadata', + check_str=base.ADMIN_OR_PROJECT_MEMBER, + scope_types=['project'], + description="Update share export location metadata.", + operations=[ + { + 'method': 'PUT', + 'path': ('/shares/{share_id}/export_locations/' + '{export_location_id}/metadata'), + }, + { + 'method': 'POST', + 'path': ('/shares/{share_id}/export_locations/' + '{export_location_id}/metadata/{key}') + }, + { + 'method': 'POST', + 'path': ('/shares/{share_id}/export_locations/' + '{export_location_id}/metadata'), + }, + ], + deprecated_rule=deprecated_update_export_location_metadata + ), + policy.DocumentedRuleDefault( + name=BASE_POLICY_NAME % 'delete_metadata', + check_str=base.ADMIN_OR_PROJECT_MEMBER, + scope_types=['project'], + description="Delete share export location metadata", + operations=[ + { + 'method': 'DELETE', + 'path': ('/shares/{share_id}/export_locations/' + '{export_location_id}/metadata/{key}') + }, + ], + deprecated_rule=deprecated_delete_export_location_metadata + ), + policy.DocumentedRuleDefault( + name=BASE_POLICY_NAME % 'get_metadata', + check_str=base.ADMIN_OR_PROJECT_READER, + scope_types=['project'], + description='Get share export location metadata', + operations=[ + { + 'method': "GET", + 'path': ('/shares/{share_id}/export_locations/' + '{export_location_id}/metadata') + }, + { + 'method': 'GET', + 'path': ('/shares/{share_id}/export_locations/' + '{export_location_id}/metadata/{key}') + }, + ], + deprecated_rule=deprecated_get_export_location_metadata + ), + policy.DocumentedRuleDefault( + name=BASE_POLICY_NAME % 'update_admin_only_metadata', + check_str=base.ADMIN, + scope_types=['project'], + description=( + "Update metadata items that are considered \"admin only\" " + "by the service."), + operations=[ + { + 'method': 'PUT', + 'path': '/shares/{share_id}/export_locations/' + '{export_location_id}/metadata', + } + ], + deprecated_rule=deprecated_update_admin_only_metadata + ), ] diff --git a/manila/services/__init__.py b/manila/services/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/manila/services/api.py b/manila/services/api.py new file mode 100644 index 0000000000..77f0e9b193 --- /dev/null +++ b/manila/services/api.py @@ -0,0 +1,38 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from webob import exc + +from manila.db import base +from manila.share import rpcapi as share_rpcapi + +CONF = cfg.CONF + + +class API(base.Base): + """API for handling service actions.""" + + def __init__(self): + super(API, self).__init__() + self.share_rpcapi = share_rpcapi.ShareAPI() + + def ensure_shares(self, context, service, host): + """Start the ensure shares in a given host.""" + + if service['state'] != "up": + raise exc.HTTPConflict( + "The service must have its state set to 'up' prior to running " + "ensure shares.") + + self.share_rpcapi.ensure_driver_resources(context, host) diff --git a/manila/share/access.py b/manila/share/access.py index 22bbd74006..da8e3958a4 100644 --- a/manila/share/access.py +++ b/manila/share/access.py @@ -277,7 +277,8 @@ def update_access_rules(self, context, share_instance_id, # Is there a sync in progress? If yes, ignore the incoming request. rule_filter = { 'state': (constants.ACCESS_STATE_APPLYING, - constants.ACCESS_STATE_DENYING), + constants.ACCESS_STATE_DENYING, + constants.ACCESS_STATE_UPDATING), } syncing_rules = self.get_and_update_share_instance_access_rules( context, filters=rule_filter, share_instance_id=share_instance_id) @@ -288,11 +289,11 @@ def update_access_rules(self, context, share_instance_id, "be applied shortly.") LOG.debug(msg, msg_payload) else: - rules_to_apply_or_deny = ( + rules_to_apply_or_update_or_deny = ( self._update_and_get_unsynced_access_rules_from_db( context, share_instance_id) ) - if rules_to_apply_or_deny: + if rules_to_apply_or_update_or_deny: msg = ("Updating access rules for share instance %(si)s " "belonging to share %(shr)s.") LOG.debug(msg, msg_payload) @@ -320,30 +321,33 @@ def _update_access_rules(self, context, share_instance_id, rules_to_be_removed_from_db = [] # Populate rules to send to the driver - (access_rules_to_be_on_share, add_rules, delete_rules) = ( + (access_rules_on_share, add_rules, delete_rules, update_rules) = ( self._get_rules_to_send_to_driver(context, share_instance) ) if share_instance['cast_rules_to_readonly']: # Ensure read/only semantics for a migrating instances - access_rules_to_be_on_share = self._set_rules_to_readonly( - access_rules_to_be_on_share, share_instance) + access_rules_on_share = self._set_rules_to_readonly( + access_rules_on_share, share_instance) add_rules = [] rules_to_be_removed_from_db = delete_rules delete_rules = [] + update_rules = [] try: driver_rule_updates = self._update_rules_through_share_driver( - context, share_instance, access_rules_to_be_on_share, - add_rules, delete_rules, rules_to_be_removed_from_db, + context, share_instance, access_rules_on_share, + add_rules, delete_rules, update_rules, + rules_to_be_removed_from_db, share_server) self.process_driver_rule_updates( context, driver_rule_updates, share_instance_id) - # Update access rules that are still in 'applying' state + # Update access rules that are still in 'applying/updating' state conditionally_change = { constants.ACCESS_STATE_APPLYING: constants.ACCESS_STATE_ACTIVE, + constants.ACCESS_STATE_UPDATING: constants.ACCESS_STATE_ACTIVE, } self.get_and_update_share_instance_access_rules( context, share_instance_id=share_instance_id, @@ -353,6 +357,7 @@ def _update_access_rules(self, context, share_instance_id, conditionally_change_rule_state = { constants.ACCESS_STATE_APPLYING: constants.ACCESS_STATE_ERROR, constants.ACCESS_STATE_DENYING: constants.ACCESS_STATE_ERROR, + constants.ACCESS_STATE_UPDATING: constants.ACCESS_STATE_ERROR, } self.get_and_update_share_instance_access_rules( context, share_instance_id=share_instance_id, @@ -387,6 +392,7 @@ def _update_access_rules(self, context, share_instance_id, def _update_rules_through_share_driver(self, context, share_instance, access_rules_to_be_on_share, add_rules, delete_rules, + update_rules, rules_to_be_removed_from_db, share_server): driver_rule_updates = {} @@ -395,6 +401,7 @@ def _update_rules_through_share_driver(self, context, share_instance, share_protocol == 'nfs'): add_rules = self._filter_ipv6_rules(add_rules) delete_rules = self._filter_ipv6_rules(delete_rules) + update_rules = self._filter_ipv6_rules(update_rules) access_rules_to_be_on_share = self._filter_ipv6_rules( access_rules_to_be_on_share) try: @@ -404,11 +411,14 @@ def _update_rules_through_share_driver(self, context, share_instance, access_rules_to_be_on_share, add_rules=add_rules, delete_rules=delete_rules, + update_rules=update_rules, share_server=share_server ) or {} except NotImplementedError: # NOTE(u_glide): Fallback to legacy allow_access/deny_access # for drivers without update_access() method support + # It is also possible that updating the access_level is not + # permitted. self._update_access_fallback(context, add_rules, delete_rules, rules_to_be_removed_from_db, share_instance, @@ -464,6 +474,7 @@ def process_driver_rule_updates(self, context, driver_rule_updates, conditional_state_updates = { constants.ACCESS_STATE_APPLYING: state, constants.ACCESS_STATE_DENYING: state, + constants.ACCESS_STATE_UPDATING: state, constants.ACCESS_STATE_ACTIVE: state, } else: @@ -501,10 +512,12 @@ def _filter_ipv6_rules(rules): def _get_rules_to_send_to_driver(self, context, share_instance): add_rules = [] delete_rules = [] + update_rules = [] access_filters = { 'state': (constants.ACCESS_STATE_APPLYING, constants.ACCESS_STATE_ACTIVE, - constants.ACCESS_STATE_DENYING), + constants.ACCESS_STATE_DENYING, + constants.ACCESS_STATE_UPDATING), } existing_rules_in_db = self.get_and_update_share_instance_access_rules( context, filters=access_filters, @@ -516,11 +529,14 @@ def _get_rules_to_send_to_driver(self, context, share_instance): add_rules.append(rule) elif rule['state'] == constants.ACCESS_STATE_DENYING: delete_rules.append(rule) + elif rule['state'] == constants.ACCESS_STATE_UPDATING: + update_rules.append(rule) delete_rule_ids = [r['id'] for r in delete_rules] access_rules_to_be_on_share = [ r for r in existing_rules_in_db if r['id'] not in delete_rule_ids ] - return access_rules_to_be_on_share, add_rules, delete_rules + return (access_rules_to_be_on_share, add_rules, + delete_rules, update_rules) def _check_needs_refresh(self, context, share_instance_id): rules_to_apply_or_deny = ( @@ -567,13 +583,16 @@ def _update_and_get_unsynced_access_rules_from_db(self, context, share_instance_id): rule_filter = { 'state': (constants.ACCESS_STATE_QUEUED_TO_APPLY, - constants.ACCESS_STATE_QUEUED_TO_DENY), + constants.ACCESS_STATE_QUEUED_TO_DENY, + constants.ACCESS_STATE_QUEUED_TO_UPDATE), } conditionally_change = { constants.ACCESS_STATE_QUEUED_TO_APPLY: constants.ACCESS_STATE_APPLYING, constants.ACCESS_STATE_QUEUED_TO_DENY: constants.ACCESS_STATE_DENYING, + constants.ACCESS_STATE_QUEUED_TO_UPDATE: + constants.ACCESS_STATE_UPDATING, } rules_to_apply_or_deny = ( self.get_and_update_share_instance_access_rules( diff --git a/manila/share/api.py b/manila/share/api.py index f37dfa09d1..8bfc28b464 100644 --- a/manila/share/api.py +++ b/manila/share/api.py @@ -427,6 +427,10 @@ def create(self, context, share_proto, size, name, description, "(%(group)s).") % params raise exception.InvalidParameterValue(msg) + if share_type: + metadata = self.update_metadata_from_share_type_extra_specs( + context, share_type, metadata) + options = { 'size': size, 'user_id': context.user_id, @@ -515,6 +519,76 @@ def create(self, context, share_proto, size, name, description, return share + def update_metadata_from_share_type_extra_specs(self, context, share_type, + user_metadata): + extra_specs = share_type.get('extra_specs', {}) + if not extra_specs: + return user_metadata + + driver_keys = getattr(CONF, 'driver_updatable_metadata', []) + if not driver_keys: + return user_metadata + + metadata_from_share_type = {} + for k, v in extra_specs.items(): + try: + prefix, metadata_key = k.split(':') + except Exception: + continue + + # consider prefix only with valid storage driver + if prefix.lower() == 'provisioning': + continue + + if metadata_key in driver_keys: + metadata_from_share_type.update({metadata_key: v}) + + metadata_from_share_type.update(user_metadata) + return metadata_from_share_type + + def update_share_from_metadata(self, context, share_id, metadata): + driver_keys = getattr(CONF, 'driver_updatable_metadata', []) + if not driver_keys: + return + + driver_metadata = {} + for k, v in metadata.items(): + if k in driver_keys: + driver_metadata.update({k: v}) + + if driver_metadata: + share = self.get(context, share_id) + self.share_rpcapi.update_share_from_metadata(context, share, + driver_metadata) + + def update_share_network_subnet_from_metadata(self, context, + share_network_id, + share_network_subnet_id, + metadata): + driver_keys = getattr(CONF, 'driver_updatable_subnet_metadata', []) + if not driver_keys: + return + + driver_metadata = {} + for k, v in metadata.items(): + if k in driver_keys: + driver_metadata.update({k: v}) + + if driver_metadata: + share_servers = ( + self.db.share_server_get_all_by_host_and_or_share_subnet( + context, + host=None, + share_subnet_id=share_network_subnet_id)) + for share_server in share_servers: + self.share_rpcapi.update_share_network_subnet_from_metadata( + context, + share_network_id, + share_network_subnet_id, + share_server, + driver_metadata + ) + def get_share_attributes_from_share_type(self, share_type): """Determine share attributes from the share type. @@ -2490,6 +2564,30 @@ def _conditionally_transition_share_instance_access_rules_status( context, conditionally_change=conditionally_change, share_instance_id=share_instance['id']) + def update_access(self, ctx, share, access, values): + + if self._any_invalid_share_instance(share, allow_on_error_state=True): + msg = _("Access rules cannot be updated while the share, " + "any of its replicas or migration copies lacks a valid " + "host or is in an invalid state.") + raise exception.InvalidShare(message=msg) + + access = self.db.share_access_update(ctx, access['id'], values) + for share_instance in share.instances: + self.update_access_to_instance(ctx, share_instance, access) + + return access + + def update_access_to_instance(self, context, share_instance, access): + self._conditionally_transition_share_instance_access_rules_status( + context, share_instance) + updates = {'state': constants.ACCESS_STATE_QUEUED_TO_UPDATE} + self.access_helper.get_and_update_share_instance_access_rule( + context, access['id'], updates=updates, + share_instance_id=share_instance['id']) + + self.share_rpcapi.update_access(context, share_instance) + def deny_access(self, ctx, share, access, allow_on_error_state=False): """Deny access to share.""" @@ -3164,14 +3262,15 @@ def share_server_migration_start( {'task_state': constants.TASK_STATE_MIGRATION_STARTING, 'status': constants.STATUS_SERVER_MIGRATING}) - share_snapshots = [ - self.db.share_snapshot_get_all_for_share(context, share['id']) - for share in shares] - snapshot_instance_ids = [] - for snapshot_list in share_snapshots: - for snapshot in snapshot_list: - snapshot_instance_ids.append(snapshot['instance']['id']) - share_instance_ids = [share['instance']['id'] for share in shares] + share_instances = self.db.share_instance_get_all_by_share_server( + context, share_server['id']) + share_instance_ids = [ + share_instance['id'] for share_instance in share_instances] + + snap_instances = self.db.share_snapshot_instance_get_all_with_filters( + context, {'share_instance_ids': share_instance_ids}) + snapshot_instance_ids = [ + snap_instance['id'] for snap_instance in snap_instances] # Updates all shares and snapshot instances self.db.share_and_snapshot_instances_status_update( diff --git a/manila/share/driver.py b/manila/share/driver.py index b5e97f83fe..ae0dc1435a 100644 --- a/manila/share/driver.py +++ b/manila/share/driver.py @@ -799,7 +799,7 @@ def deny_access(self, context, share, access, share_server=None): raise NotImplementedError() def update_access(self, context, share, access_rules, add_rules, - delete_rules, share_server=None): + delete_rules, update_rules, share_server=None): """Update access rules for given share. ``access_rules`` contains all access_rules that need to be on the @@ -840,6 +840,8 @@ def update_access(self, context, share, access_rules, add_rules, added. access_rules already contains these rules. :param delete_rules: Empty List or List of access rules which should be removed. access_rules doesn't contain these rules. + :param update_rules: Empty List or List of access rules which should be + updated. access_rules already contains these rules. :param share_server: None or Share server model :returns: None, or a dictionary of updates in the format:: @@ -3735,3 +3737,36 @@ def restore_backup_continue(self, context, backup, share_instance, :param share_server: share server in case of dhss_true """ raise NotImplementedError() + + def update_share_from_metadata(self, context, share, metadata, + share_server=None): + """Update the share from metadata. + + Driver must implement this method if needs to perform some action + on given resource (i.e. share) based on provided metadata. + + :param context: The 'context.RequestContext' object for the request. + :param share: Share instance model with share data. + :param metadata: Dict contains key-value pair where driver will + perform necessary action based on key. + :param share_server: Reference to the share server. + """ + raise NotImplementedError() + + def update_share_network_subnet_from_metadata(self, context, + share_network, + share_network_subnet, + share_server, metadata): + """Update the share network subnet from metadata. + + Driver must implement this method if it can perform some action on + given resource (i.e. share network subnet) based on provided metadata. + + :param context: The 'context.RequestContext' object for the request. + :param share_network: share network model + :param share_network_subnet: share network subnet model + :param share_server: share-server model. + :param metadata: Dict contains key-value pair where driver will + perform necessary action based on key. + """ + raise NotImplementedError() diff --git a/manila/share/drivers/cephfs/driver.py b/manila/share/drivers/cephfs/driver.py index f71d2d3d54..b2037f5d65 100644 --- a/manila/share/drivers/cephfs/driver.py +++ b/manila/share/drivers/cephfs/driver.py @@ -550,10 +550,10 @@ def delete_share(self, context, share, share_server=None): rados_command(self.rados_client, "fs subvolume rm", argdict) def update_access(self, context, share, access_rules, add_rules, - delete_rules, share_server=None): + delete_rules, update_rules, share_server=None): return self.protocol_helper.update_access( context, share, access_rules, add_rules, delete_rules, - share_server=share_server) + update_rules, share_server=share_server) def get_backend_info(self, context): return self.protocol_helper.get_backend_info(context) @@ -953,7 +953,7 @@ def _deny_access(self, context, share, access, share_server=None): rados_command(self.rados_client, "fs subvolume evict", argdict) def update_access(self, context, share, access_rules, add_rules, - delete_rules, share_server=None): + delete_rules, update_rules, share_server=None): access_updates = {} argdict = { @@ -1340,7 +1340,6 @@ def _allow_access(self, share, access): "pseudo": self._get_export_pseudo_path(share), "squash": "none", "security_label": True, - "protocols": [4], "fsal": { "name": "CEPH", "fs_name": self.volname, @@ -1368,7 +1367,7 @@ def _deny_access(self, share): rados_command(self.rados_client, "nfs export rm", argdict) def update_access(self, context, share, access_rules, add_rules, - delete_rules, share_server=None): + delete_rules, update_rules, share_server=None): """Update access rules of share. Creates an export per share. Modifies access rules of shares by diff --git a/manila/share/drivers/container/driver.py b/manila/share/drivers/container/driver.py index dc9ebd4308..78773241d5 100644 --- a/manila/share/drivers/container/driver.py +++ b/manila/share/drivers/container/driver.py @@ -190,7 +190,7 @@ def ensure_share(self, context, share, share_server=None): pass def update_access(self, context, share, access_rules, add_rules, - delete_rules, share_server=None): + delete_rules, update_rules, share_server=None): server_id = self._get_container_name(share_server["id"]) share_name = self._get_share_name(share) LOG.debug("Updating access to share %(share)s at " diff --git a/manila/share/drivers/dell_emc/driver.py b/manila/share/drivers/dell_emc/driver.py index 660a54e35d..a898559b7b 100644 --- a/manila/share/drivers/dell_emc/driver.py +++ b/manila/share/drivers/dell_emc/driver.py @@ -257,7 +257,7 @@ def deny_access(self, context, share, access, share_server=None): self.plugin.deny_access(context, share, access, share_server) def update_access(self, context, share, access_rules, add_rules, - delete_rules, share_server=None): + delete_rules, update_rules, share_server=None): """Update access to the share.""" return self.plugin.update_access(context, share, access_rules, add_rules, delete_rules, share_server) diff --git a/manila/share/drivers/dell_emc/plugins/unity/connection.py b/manila/share/drivers/dell_emc/plugins/unity/connection.py index baff0addb9..6ed31c1836 100644 --- a/manila/share/drivers/dell_emc/plugins/unity/connection.py +++ b/manila/share/drivers/dell_emc/plugins/unity/connection.py @@ -44,9 +44,10 @@ 8.0.0 - Supports manage/unmanage share server/share/snapshot 9.0.0 - Implements default filter function 9.0.1 - Bugfix: remove enable ace process when creating cifs share + 9.0.2 - Bugfix: fix the driver startup issue with LACP ports configured """ -VERSION = "9.0.1" +VERSION = "9.0.2" LOG = log.getLogger(__name__) SUPPORTED_NETWORK_TYPES = (None, 'flat', 'vlan') diff --git a/manila/share/drivers/dell_emc/plugins/unity/utils.py b/manila/share/drivers/dell_emc/plugins/unity/utils.py index 433918906d..efb76f896b 100644 --- a/manila/share/drivers/dell_emc/plugins/unity/utils.py +++ b/manila/share/drivers/dell_emc/plugins/unity/utils.py @@ -61,7 +61,12 @@ def match_ports(ports_list, port_ids_conf): port_id = port.get_id() for pattern in patterns: if fnmatch.fnmatchcase(port_id, pattern): - sp_id = port.parent_storage_processor.get_id() + # parentStorageProcessor property is deprecated in Unity 5.x + if port.parent_storage_processor: + sp = port.parent_storage_processor + else: + sp = port.storage_processor + sp_id = sp.get_id() ports_set = sp_ports_map.setdefault(sp_id, set()) ports_set.add(port_id) break diff --git a/manila/share/drivers/ganesha/__init__.py b/manila/share/drivers/ganesha/__init__.py index f73939af45..e11a27cca7 100644 --- a/manila/share/drivers/ganesha/__init__.py +++ b/manila/share/drivers/ganesha/__init__.py @@ -50,7 +50,7 @@ def init_helper(self): @abc.abstractmethod def update_access(self, context, share, access_rules, add_rules, - delete_rules, share_server=None): + delete_rules, update_rules, share_server=None): """Update access rules of share.""" def get_backend_info(self, context): @@ -161,7 +161,7 @@ def _deny_access(self, base_path, share, access): self.ganesha.remove_export("%s--%s" % (share['name'], access['id'])) def update_access(self, context, share, access_rules, add_rules, - delete_rules, share_server=None): + delete_rules, update_rules, share_server=None): """Update access rules of share.""" rule_state_map = {} if not (add_rules or delete_rules): @@ -232,7 +232,7 @@ def _get_export_pseudo_path(self, share): raise NotImplementedError() def update_access(self, context, share, access_rules, add_rules, - delete_rules, share_server=None): + delete_rules, update_rules, share_server=None): """Update access rules of share. Creates an export per share. Modifies access rules of shares by diff --git a/manila/share/drivers/generic.py b/manila/share/drivers/generic.py index d7f08a0586..e8f29c6ed6 100644 --- a/manila/share/drivers/generic.py +++ b/manila/share/drivers/generic.py @@ -286,7 +286,7 @@ def _is_device_mounted(self, mount_path, server_details, volume=None): return True return False - def _add_mount_permanently(self, share_id, server_details): + def _add_mount_permanently(self, share_id, device_path, server_details): """Add mount permanently for mounted filesystems.""" try: self._ssh_exec( @@ -294,6 +294,17 @@ def _add_mount_permanently(self, share_id, server_details): ['grep', share_id, const.MOUNT_FILE_TEMP, '|', 'sudo', 'tee', '-a', const.MOUNT_FILE], ) + output, __ = self._ssh_exec( + server_details, + ['lsblk', '-o', 'uuid', '-n', device_path]) + if output: + device_uuid = f"UUID={output.strip()}" + self._ssh_exec( + server_details, + ['sudo', 'sed', '-i', "s@{}@{}@".format(device_path, + device_uuid), + const.MOUNT_FILE] + ) except exception.ProcessExecutionError as e: LOG.error("Failed to add 'Share-%(share_id)s' mount " "permanently on server '%(instance_id)s'.", @@ -363,7 +374,8 @@ def _mount_device_with_lock(): '&&', 'sudo', 'mount', device_path, mount_path, ) self._ssh_exec(server_details, mount_cmd) - self._add_mount_permanently(share.id, server_details) + self._add_mount_permanently(share.id, device_path, + server_details) else: LOG.warning("Mount point '%(path)s' already exists on " "server '%(server)s'.", log_data) @@ -852,7 +864,7 @@ def ensure_share(self, context, share, share_server=None): @ensure_server def update_access(self, context, share, access_rules, add_rules, - delete_rules, share_server=None): + delete_rules, update_rules, share_server=None): """Update access rules for given share. This driver has two different behaviors according to parameters: @@ -873,6 +885,8 @@ def update_access(self, context, share, access_rules, add_rules, added. access_rules already contains these rules. :param delete_rules: Empty List or List of access rules which should be removed. access_rules doesn't contain these rules. + :param update_rules: Empty List or List of access rules which should be + updated. access_rules already contains these rules. :param share_server: None or Share server model """ self._get_helper(share).update_access(share_server['backend_details'], diff --git a/manila/share/drivers/glusterfs/layout.py b/manila/share/drivers/glusterfs/layout.py index 055f5f1a4a..e3a258dc35 100644 --- a/manila/share/drivers/glusterfs/layout.py +++ b/manila/share/drivers/glusterfs/layout.py @@ -115,7 +115,7 @@ def validator(rule): return validator def update_access(self, context, share, access_rules, add_rules, - delete_rules, share_server=None): + delete_rules, update_rules, share_server=None): """Update access rules for given share. Driver supports 2 different cases in this method: @@ -125,9 +125,10 @@ def update_access(self, context, share, access_rules, add_rules, This recovery is made at driver start up. 2. Adding/Deleting of several access rules - 'access_rules' contains - all access_rules, 'add_rules' and 'delete_rules' contain rules which - should be added/deleted. Driver can ignore rules in 'access_rules' and - apply only rules from 'add_rules' and 'delete_rules'. + all access_rules, 'add_rules' and 'delete_rules' and 'update_rules' + contain rules which should be added/deleted/updated. Driver can + ignore rules in 'access_rules' and apply only rules from 'add_rules', + 'delete_rules' and 'update_rules'. """ gluster_mgr = self.layout._share_manager(share) diff --git a/manila/share/drivers/helpers.py b/manila/share/drivers/helpers.py index 12e0de8457..9d68653e78 100644 --- a/manila/share/drivers/helpers.py +++ b/manila/share/drivers/helpers.py @@ -339,6 +339,8 @@ def get_host_list(output, local_path): items = line.split(' ') if local_path == items[0]: entries.append(items[1]) + # exportfs may print"" instead of "*" for host + entries = ["*" if item == "" else item for item in entries] return entries def _sync_nfs_temp_and_perm_files(self, server): diff --git a/manila/share/drivers/hitachi/hnas/driver.py b/manila/share/drivers/hitachi/hnas/driver.py index 193fc7d61d..f598163487 100644 --- a/manila/share/drivers/hitachi/hnas/driver.py +++ b/manila/share/drivers/hitachi/hnas/driver.py @@ -157,7 +157,7 @@ def __init__(self, *args, **kwargs): job_timeout) def update_access(self, context, share, access_rules, add_rules, - delete_rules, share_server=None): + delete_rules, update_rules, share_server=None): """Update access rules for given share. :param context: The `context.RequestContext` object for the request @@ -167,6 +167,8 @@ def update_access(self, context, share, access_rules, add_rules, added. access_rules already contains these rules. :param delete_rules: Empty List or List of access rules which should be removed. access_rules doesn't contain these rules. + :param update_rules: Empty List or List of access rules which should be + updated. access_rules already contains these rules. :param share_server: Data structure with share server information. Not used by this driver. """ diff --git a/manila/share/drivers/hitachi/hsp/driver.py b/manila/share/drivers/hitachi/hsp/driver.py index bd227b7f0c..a96f7574d6 100644 --- a/manila/share/drivers/hitachi/hsp/driver.py +++ b/manila/share/drivers/hitachi/hsp/driver.py @@ -167,7 +167,7 @@ def delete_share(self, context, share, share_server=None): {'shr': share['id']}) def update_access(self, context, share, access_rules, add_rules, - delete_rules, share_server=None): + delete_rules, update_rules, share_server=None): LOG.debug("Updating access rules for share: %(shr)s.", {'shr': share['id']}) diff --git a/manila/share/drivers/hpe/hpe_3par_driver.py b/manila/share/drivers/hpe/hpe_3par_driver.py index 46b2a5533a..999ff1ffd6 100644 --- a/manila/share/drivers/hpe/hpe_3par_driver.py +++ b/manila/share/drivers/hpe/hpe_3par_driver.py @@ -552,7 +552,7 @@ def ensure_share(self, context, share, share_server=None): pass def update_access(self, context, share, access_rules, add_rules, - delete_rules, share_server=None): + delete_rules, update_rules, share_server=None): """Update access to the share.""" extra_specs = None if 'NFS' == share['share_proto']: # Avoiding DB call otherwise diff --git a/manila/share/drivers/huawei/base.py b/manila/share/drivers/huawei/base.py index 79fac2a71b..2c5342bd8c 100644 --- a/manila/share/drivers/huawei/base.py +++ b/manila/share/drivers/huawei/base.py @@ -54,7 +54,7 @@ def ensure_share(self, share, share_server=None): @abc.abstractmethod def update_access(self, share, access_rules, add_rules, - delete_rules, share_server): + delete_rules, update_rules, share_server): """Update access rules list.""" @abc.abstractmethod diff --git a/manila/share/drivers/huawei/huawei_nas.py b/manila/share/drivers/huawei/huawei_nas.py index 9218fac7f8..70bae8d8a3 100644 --- a/manila/share/drivers/huawei/huawei_nas.py +++ b/manila/share/drivers/huawei/huawei_nas.py @@ -163,11 +163,11 @@ def deny_access(self, context, share, access, share_server=None): self.plugin.deny_access(share, access, share_server) def update_access(self, context, share, access_rules, add_rules, - delete_rules, share_server=None): + delete_rules, update_rules, share_server=None): """Update access rules list.""" LOG.debug("Update access.") - self.plugin.update_access(share, access_rules, - add_rules, delete_rules, share_server) + self.plugin.update_access(share, access_rules, add_rules, + delete_rules, update_rules, share_server) def get_pool(self, share): """Return pool name where the share resides on.""" diff --git a/manila/share/drivers/huawei/v3/connection.py b/manila/share/drivers/huawei/v3/connection.py index fb3c7b1a8e..6dcd93f340 100644 --- a/manila/share/drivers/huawei/v3/connection.py +++ b/manila/share/drivers/huawei/v3/connection.py @@ -809,7 +809,7 @@ def clear_access(self, share, share_server=None): share_proto) def update_access(self, share, access_rules, add_rules, - delete_rules, share_server=None): + delete_rules, update_rules, share_server=None): """Update access rules list.""" if not (add_rules or delete_rules): self.clear_access(share, share_server) @@ -1789,7 +1789,8 @@ def promote_replica(self, context, replica_list, replica, access_rules, cleared_old_active_access = True try: - self.update_access(replica, access_rules, [], [], share_server) + self.update_access(replica, access_rules, + [], [], [], share_server) except Exception: LOG.warning('Failed to set access rules to ' 'new active replica %s.', diff --git a/manila/share/drivers/ibm/gpfs.py b/manila/share/drivers/ibm/gpfs.py index 74905e5b0d..a313dda76b 100644 --- a/manila/share/drivers/ibm/gpfs.py +++ b/manila/share/drivers/ibm/gpfs.py @@ -512,7 +512,7 @@ def ensure_share(self, ctx, share, share_server=None): """Ensure that storage are mounted and exported.""" def update_access(self, context, share, access_rules, add_rules, - delete_rules, share_server=None): + delete_rules, update_rules, share_server=None): """Update access rules for given share.""" helper = self._get_helper(share) location = self._get_share_path(share) diff --git a/manila/share/drivers/infinidat/infinibox.py b/manila/share/drivers/infinidat/infinibox.py index 81eed719ea..d0afa4c5fb 100644 --- a/manila/share/drivers/infinidat/infinibox.py +++ b/manila/share/drivers/infinidat/infinibox.py @@ -543,7 +543,7 @@ def get_backend_info(self, context): } def update_access(self, context, share, access_rules, add_rules, - delete_rules, share_server=None): + delete_rules, update_rules, share_server=None): # As the Infinibox API can bulk update export access rules, we will try # to use the access_rules list self._verify_share_protocol(share) diff --git a/manila/share/drivers/infortrend/driver.py b/manila/share/drivers/infortrend/driver.py index 6459c5ebba..c5639dcd9e 100644 --- a/manila/share/drivers/infortrend/driver.py +++ b/manila/share/drivers/infortrend/driver.py @@ -130,7 +130,7 @@ def _update_share_stats(self): super(InfortrendNASDriver, self)._update_share_stats(data) def update_access(self, context, share, access_rules, add_rules, - delete_rules, share_server=None): + delete_rules, update_rules, share_server=None): """Update access rules for given share. :param context: Current context @@ -140,6 +140,8 @@ def update_access(self, context, share, access_rules, add_rules, added. access_rules already contains these rules. :param delete_rules: Empty List or List of access rules which should be removed. access_rules doesn't contain these rules. + :param update_rules: Empty List or List of access rules which should be + updated. access_rules already contains these rules. :param share_server: Not used by this driver. :returns: None, or a dictionary of ``access_id``, ``access_key`` as diff --git a/manila/share/drivers/inspur/as13000/as13000_nas.py b/manila/share/drivers/inspur/as13000/as13000_nas.py index e135503b78..fdea65709e 100644 --- a/manila/share/drivers/inspur/as13000/as13000_nas.py +++ b/manila/share/drivers/inspur/as13000/as13000_nas.py @@ -472,7 +472,7 @@ def transfer_rule_to_client(proto, rule): @inspur_driver_debug_trace def update_access(self, context, share, access_rules, add_rules, - delete_rules, share_server=None): + delete_rules, update_rules, share_server=None): """update access of share""" pool, share_name, _, proto = self._get_share_instance_pnsp(share) share_path = self._generate_share_path(pool, share_name) diff --git a/manila/share/drivers/inspur/instorage/instorage.py b/manila/share/drivers/inspur/instorage/instorage.py index 3a1918063e..1ee8ee7ac0 100644 --- a/manila/share/drivers/inspur/instorage/instorage.py +++ b/manila/share/drivers/inspur/instorage/instorage.py @@ -222,7 +222,7 @@ def ensure_share(self, context, share, share_server=None): return self.assistant.get_export_locations(share_name, share_proto) def update_access(self, context, share, access_rules, add_rules, - delete_rules, share_server=None): + delete_rules, update_rules, share_server=None): """Update the share instance's access rule.""" share_name = self.generate_share_name(share) share_proto = share['share_proto'] diff --git a/manila/share/drivers/lvm.py b/manila/share/drivers/lvm.py index 73bcf338bc..908dac68ba 100644 --- a/manila/share/drivers/lvm.py +++ b/manila/share/drivers/lvm.py @@ -367,7 +367,7 @@ def _delete_share(self, ctx, share): LOG.warning(exc) def update_access(self, context, share, access_rules, add_rules, - delete_rules, share_server=None): + delete_rules, update_rules, share_server=None): """Update access rules for given share. This driver has two different behaviors according to parameters: @@ -388,6 +388,8 @@ def update_access(self, context, share, access_rules, add_rules, added. access_rules already contains these rules. :param delete_rules: Empty List or List of access rules which should be removed. access_rules doesn't contain these rules. + :param update_rules: Empty List or List of access rules which should be + updated. access_rules already contains these rules. :param share_server: None or Share server model """ share_export_location = self._get_mount_point_name(share) diff --git a/manila/share/drivers/macrosan/macrosan_nas.py b/manila/share/drivers/macrosan/macrosan_nas.py index 6e951467ee..0c74ff720a 100644 --- a/manila/share/drivers/macrosan/macrosan_nas.py +++ b/manila/share/drivers/macrosan/macrosan_nas.py @@ -145,7 +145,7 @@ def ensure_share(self, context, share, share_server=None): @debug_trace def update_access(self, context, share, access_rules, add_rules, - delete_rules, share_server=None): + delete_rules, update_rules, share_server=None): """Update access rules list. :param context: Current context @@ -155,6 +155,8 @@ def update_access(self, context, share, access_rules, add_rules, added. access_rules already contains these rules. :param delete_rules: Empty List or List of access rules which should be removed. access_rules doesn't contain these rules. + :param update_rules: Empty List or List of access rules which should be + updated. access_rules already contains these rules. :param share_server: Not used by this driver. :returns: None, or a dictionary of ``access_id``, ``access_key`` as diff --git a/manila/share/drivers/maprfs/maprfs_native.py b/manila/share/drivers/maprfs/maprfs_native.py index 708afc9eed..c53098fd86 100644 --- a/manila/share/drivers/maprfs/maprfs_native.py +++ b/manila/share/drivers/maprfs/maprfs_native.py @@ -283,7 +283,7 @@ def delete_snapshot(self, context, snapshot, share_server=None): raise exception.MapRFSException(msg=msg) def update_access(self, context, share, access_rules, add_rules, - delete_rules, share_server=None): + delete_rules, update_rules, share_server=None): """Update access rules for given share.""" for access in access_rules: if access['access_type'].lower() != 'user': diff --git a/manila/share/drivers/netapp/dataontap/client/client_cmode.py b/manila/share/drivers/netapp/dataontap/client/client_cmode.py index e1021170a7..7d9d6e6824 100644 --- a/manila/share/drivers/netapp/dataontap/client/client_cmode.py +++ b/manila/share/drivers/netapp/dataontap/client/client_cmode.py @@ -71,6 +71,7 @@ def _init_features(self): ontapi_1_20 = ontapi_version >= (1, 20) ontapi_1_2x = (1, 20) <= ontapi_version < (1, 30) ontapi_1_30 = ontapi_version >= (1, 30) + ontapi_1_100 = ontapi_version >= (1, 100) ontapi_1_110 = ontapi_version >= (1, 110) ontapi_1_120 = ontapi_version >= (1, 120) ontapi_1_140 = ontapi_version >= (1, 140) @@ -78,6 +79,7 @@ def _init_features(self): ontapi_1_180 = ontapi_version >= (1, 180) ontapi_1_191 = ontapi_version >= (1, 191) ontap_9_10 = self.get_system_version()['version-tuple'] >= (9, 10, 0) + ontap_9_10_1 = self.get_system_version()['version-tuple'] >= (9, 10, 1) self.features.add_feature('SNAPMIRROR_V2', supported=ontapi_1_20) self.features.add_feature('SYSTEM_METRICS', supported=ontapi_1_2x) @@ -102,6 +104,8 @@ def _init_features(self): self.features.add_feature('FLEXGROUP', supported=ontapi_1_180) self.features.add_feature('FLEXGROUP_FAN_OUT', supported=ontapi_1_191) self.features.add_feature('SVM_MIGRATE', supported=ontap_9_10) + self.features.add_feature('SNAPLOCK', supported=ontapi_1_100) + self.features.add_feature('UNIFIED_AGGR', supported=ontap_9_10_1) def _invoke_vserver_api(self, na_element, vserver): server = copy.copy(self.connection) @@ -1600,6 +1604,22 @@ def setup_security_services(self, security_services, vserver_client, 'Data ONTAP driver') raise exception.NetAppException(msg % security_service['type']) + @na_utils.trace + def update_showmount(self, showmount): + """Update show mount for vserver. """ + nfs_service_modify_arg = { + 'showmount': showmount + } + self.send_request('nfs-service-modify', nfs_service_modify_arg) + + @na_utils.trace + def update_pnfs(self, pnfs): + """Update pNFS for vserver. """ + nfs_service_modify_arg = { + 'is-nfsv41-pnfs-enabled': pnfs + } + self.send_request('nfs-service-modify', nfs_service_modify_arg) + @na_utils.trace def enable_nfs(self, versions, nfs_config=None): """Enables NFS on Vserver.""" @@ -2224,7 +2244,8 @@ def create_volume(self, aggregate_name, volume_name, size_gb, compression_enabled=False, max_files=None, snapshot_reserve=None, volume_type='rw', qos_policy_group=None, adaptive_qos_policy_group=None, - encrypt=False, mount_point_name=None, **options): + encrypt=False, mount_point_name=None, + snaplock_type=None, **options): """Creates a volume.""" if adaptive_qos_policy_group and not self.features.ADAPTIVE_QOS: msg = 'Adaptive QoS not supported on this backend ONTAP version.' @@ -2238,16 +2259,21 @@ def create_volume(self, aggregate_name, volume_name, size_gb, api_args.update(self._get_create_volume_api_args( volume_name, thin_provisioned, snapshot_policy, language, snapshot_reserve, volume_type, qos_policy_group, encrypt, - adaptive_qos_policy_group, mount_point_name)) + adaptive_qos_policy_group, mount_point_name, snaplock_type)) self.send_request('volume-create', api_args) - self.update_volume_efficiency_attributes(volume_name, - dedup_enabled, - compression_enabled) + efficiency_policy = options.get('efficiency_policy', None) + self.update_volume_efficiency_attributes( + volume_name, dedup_enabled, compression_enabled, + efficiency_policy=efficiency_policy + ) if max_files is not None: self.set_volume_max_files(volume_name, max_files) + if snaplock_type is not None: + self.set_snaplock_attributes(volume_name, **options) + @na_utils.trace def create_volume_async(self, aggregate_list, volume_name, size_gb, thin_provisioned=False, snapshot_policy=None, @@ -2255,7 +2281,7 @@ def create_volume_async(self, aggregate_list, volume_name, size_gb, volume_type='rw', qos_policy_group=None, encrypt=False, adaptive_qos_policy_group=None, auto_provisioned=False, mount_point_name=None, - **options): + snaplock_type=None, **options): """Creates a volume asynchronously.""" if adaptive_qos_policy_group and not self.features.ADAPTIVE_QOS: @@ -2274,7 +2300,7 @@ def create_volume_async(self, aggregate_list, volume_name, size_gb, api_args.update(self._get_create_volume_api_args( volume_name, thin_provisioned, snapshot_policy, language, snapshot_reserve, volume_type, qos_policy_group, encrypt, - adaptive_qos_policy_group, mount_point_name)) + adaptive_qos_policy_group, mount_point_name, snaplock_type)) result = self.send_request('volume-create-async', api_args) job_info = { @@ -2282,7 +2308,6 @@ def create_volume_async(self, aggregate_list, volume_name, size_gb, 'error-code': result.get_child_content('result-error-code'), 'error-message': result.get_child_content('result-error-message') } - return job_info def _get_create_volume_api_args(self, volume_name, thin_provisioned, @@ -2290,7 +2315,8 @@ def _get_create_volume_api_args(self, volume_name, thin_provisioned, snapshot_reserve, volume_type, qos_policy_group, encrypt, adaptive_qos_policy_group, - mount_point_name=None): + mount_point_name=None, + snaplock_type=None): api_args = { 'volume-type': volume_type, 'space-reserve': ('none' if thin_provisioned else 'volume'), @@ -2319,8 +2345,32 @@ def _get_create_volume_api_args(self, volume_name, thin_provisioned, else: api_args['encrypt'] = 'false' + if snaplock_type is not None: + api_args['snaplock-type'] = snaplock_type + return api_args + @na_utils.trace + def update_volume_snapshot_policy(self, volume_name, snapshot_policy): + """Set snapshot policy for the specified volume.""" + api_args = { + 'query': { + 'volume-attributes': { + 'volume-id-attributes': { + 'name': volume_name, + }, + }, + }, + 'attributes': { + 'volume-attributes': { + 'volume-snapshot-attributes': { + 'snapshot-policy': snapshot_policy, + }, + }, + }, + } + self.send_request('volume-modify-iter', api_args) + @na_utils.trace @manila_utils.retry(retry_param=exception.NetAppException, interval=3, @@ -2415,6 +2465,28 @@ def disable_compression_async(self, volume_name): } self.connection.send_request('sis-set-config-async', api_args) + @na_utils.trace + def apply_volume_efficiency_policy(self, volume_name, + efficiency_policy=None): + """Apply efficiency policy to FlexVol/FlexGroup volume.""" + if efficiency_policy: + api_args = { + 'path': f'/vol/{volume_name}', + 'policy-name': efficiency_policy + } + self.send_request('sis-set-config', api_args) + + @na_utils.trace + def apply_volume_efficiency_policy_async(self, volume_name, + efficiency_policy=None): + """Apply efficiency policy to FlexVol volume asynchronously.""" + if efficiency_policy: + api_args = { + 'path': f'/vol/{volume_name}', + 'policy-name': efficiency_policy + } + self.connection.send_request('sis-set-config-async', api_args) + @na_utils.trace def get_volume_efficiency_status(self, volume_name): """Get dedupe & compression status for a volume.""" @@ -2702,17 +2774,20 @@ def modify_volume(self, aggregate_name, volume_name, not hide_snapdir).lower() self.send_request('volume-modify-iter', api_args) - + efficiency_policy = options.get('efficiency_policy', None) # Efficiency options must be handled separately - self.update_volume_efficiency_attributes(volume_name, - dedup_enabled, - compression_enabled, - is_flexgroup=is_flexgroup) + self.update_volume_efficiency_attributes( + volume_name, dedup_enabled, compression_enabled, + is_flexgroup=is_flexgroup, efficiency_policy=efficiency_policy + ) + if self._is_snaplock_enabled_volume(volume_name): + self.set_snaplock_attributes(volume_name, **options) @na_utils.trace def update_volume_efficiency_attributes(self, volume_name, dedup_enabled, compression_enabled, - is_flexgroup=False): + is_flexgroup=False, + efficiency_policy=None): """Update dedupe & compression attributes to match desired values.""" efficiency_status = self.get_volume_efficiency_status(volume_name) @@ -2743,6 +2818,13 @@ def update_volume_efficiency_attributes(self, volume_name, dedup_enabled, else: self.disable_compression(volume_name) + if is_flexgroup: + self.apply_volume_efficiency_policy_async( + volume_name, efficiency_policy=efficiency_policy) + else: + self.apply_volume_efficiency_policy( + volume_name, efficiency_policy=efficiency_policy) + @na_utils.trace def volume_exists(self, volume_name): """Checks if volume exists.""" @@ -2947,6 +3029,9 @@ def get_volume(self, volume_name): 'size': None, 'size-used': None, }, + 'volume-snaplock-attributes': { + 'snaplock-type': None, + }, }, }, } @@ -2971,6 +3056,8 @@ def get_volume(self, volume_name): 'volume-qos-attributes') or netapp_api.NaElement('none') volume_space_attributes = volume_attributes.get_child_by_name( 'volume-space-attributes') or netapp_api.NaElement('none') + volume_snaplock_attributes = volume_attributes.get_child_by_name( + 'volume-snaplock-attributes') or netapp_api.NaElement('none') aggregate = volume_id_attributes.get_child_content( 'containing-aggregate-name') @@ -2998,7 +3085,9 @@ def get_volume(self, volume_name): 'qos-policy-group-name': volume_qos_attributes.get_child_content( 'policy-group-name'), 'style-extended': volume_id_attributes.get_child_content( - 'style-extended') + 'style-extended'), + 'snaplock-type': volume_snaplock_attributes.get_child_content( + 'snaplock-type') } return volume @@ -4081,9 +4170,15 @@ def get_aggregate(self, aggregate_name): }, } + if self.features.SNAPLOCK: + snaplock_attributes = {'is-snaplock': None, 'snaplock-type': None} + desired_attributes['aggr-attributes'][ + 'aggr-snaplock-attributes'] = snaplock_attributes try: - aggrs = self._get_aggregates(aggregate_names=[aggregate_name], - desired_attributes=desired_attributes) + aggrs = self._get_aggregates( + aggregate_names=[aggregate_name], + desired_attributes=desired_attributes + ) except netapp_api.NaApiError: msg = _('Failed to get info for aggregate %s.') LOG.exception(msg, aggregate_name) @@ -4097,16 +4192,24 @@ def get_aggregate(self, aggregate_name): 'aggr-raid-attributes') or netapp_api.NaElement('none') aggr_owner_attrs = aggr_attributes.get_child_by_name( 'aggr-ownership-attributes') or netapp_api.NaElement('none') + aggr_snaplock_attrs = aggr_attributes.get_child_by_name( + 'aggr-snaplock-attributes') or netapp_api.NaElement('none') aggregate = { 'name': aggr_attributes.get_child_content('aggregate-name'), 'raid-type': aggr_raid_attrs.get_child_content('raid-type'), 'is-hybrid': strutils.bool_from_string( - aggr_raid_attrs.get_child_content('is-hybrid')), + aggr_raid_attrs.get_child_content('is-hybrid') + ), 'is-home': (aggr_owner_attrs.get_child_content('owner-id') == - aggr_owner_attrs.get_child_content('home-id')) + aggr_owner_attrs.get_child_content('home-id')), + 'is-snaplock': aggr_snaplock_attrs.get_child_content( + 'is-snaplock', + ), + 'snaplock-type': aggr_snaplock_attrs.get_child_content( + 'snaplock-type', + ), } - return aggregate @na_utils.trace @@ -6334,3 +6437,145 @@ def list_volume_snapshots(self, volume_name, snapmirror_label=None, 'attributes-list') or netapp_api.NaElement('none') return [snapshot_info.get_child_content('name') for snapshot_info in attributes_list.get_children()] + + @na_utils.trace + def is_snaplock_compliance_clock_configured(self, node_name): + """Get the Snaplock compliance is configured for each node""" + api_args = {'node': node_name} + result = self.send_request('snaplock-get-node-compliance-clock', + api_args) + node_compliance_clock = result.get_child_by_name( + "snaplock-node-compliance-clock" + ) + if not node_compliance_clock: + raise exception.NetAppException( + "Compliance clock is not configured for node %s", + node_name, + ) + clock_info = node_compliance_clock.get_child_by_name( + "compliance-clock-info") + clock_fmt_value = clock_info.get_child_content( + "formatted-snaplock-compliance-clock") + return 'not configured' not in clock_fmt_value.lower() + + @na_utils.trace + def set_snaplock_attributes(self, volume_name, **options): + """Set the retention period for SnapLock enabled volume""" + api_args = {} + snaplock_attribute_mapping = { + 'snaplock_autocommit_period': 'autocommit-period', + 'snaplock_min_retention_period': 'minimum-retention-period', + 'snaplock_max_retention_period': 'maximum-retention-period', + 'snaplock_default_retention_period': 'default-retention-period', + } + for share_type_attr, na_api_attr in snaplock_attribute_mapping.items(): + if options.get(share_type_attr): + api_args[na_api_attr] = options.get(share_type_attr) + + if all(value is None for value in api_args.values()): + LOG.debug("All SnapLock attributes are None, doesn't" + " updated SnapLock attributes") + return + + api_args['volume'] = volume_name + default_retention_period = options.get( + 'snaplock_default_retention_period' + ) + if default_retention_period and default_retention_period == "max": + api_args['default-retention-period'] = ( + api_args['maximum-retention-period'] + ) + elif default_retention_period and default_retention_period == "min": + api_args['default-retention-period'] = ( + api_args['minimum-retention-period'] + ) + self.send_request('volume-set-snaplock-attrs', api_args) + + @na_utils.trace + def _is_snaplock_enabled_volume(self, volume_name): + """Get whether volume is SnapLock enabled or disabled""" + vol_attr = self.get_volume(volume_name) + return vol_attr.get('snaplock-type') in ("compliance", "enterprise") + + @na_utils.trace + def get_vserver_aggr_snaplock_type(self, aggr_name): + """Get SnapLock type for vserver aggregate""" + api_args = { + 'query': { + 'show-aggregates': { + 'aggregate-name': aggr_name, + }, + }, + 'desired-attributes': { + 'show-aggregates': { + 'snaplock-type': None, + }, + }, + } + + if self.features.SNAPLOCK: + result = self.send_iter_request('vserver-show-aggr-get-iter', + api_args) + else: + return None + if result is not None and self._has_records(result): + attributes_list = result.get_child_by_name( + 'attributes-list') or netapp_api.NaElement('none') + vs_aggr_attributes = attributes_list.get_child_by_name( + 'show-aggregates') or netapp_api.NaElement('none') + return vs_aggr_attributes.get_child_content('snaplock-type') + + @na_utils.trace + def get_storage_failover_partner(self, node_name): + """Get the partner node of HA pair""" + api_args = {'node': node_name} + result = self.send_request('cf-get-partner', api_args) + partner_node = result.get_child_content("partner") + return partner_node + + @na_utils.trace + def get_migratable_data_lif_for_node(self, node): + """Get available LIFs that can be migrated to another node.""" + failover_policy = ['system-defined', 'sfo-partner-only'] + protocols = ['nfs', 'cifs'] + api_args = { + 'query': { + 'net-interface-info': { + 'failover-policy': '|'.join(failover_policy), + 'home-node': node, + 'data-protocols': { + 'data-protocol': '|'.join(protocols), + } + } + } + } + result = self.send_iter_request('net-interface-get-iter', api_args) + lif_info_list = result.get_child_by_name( + 'attributes-list') or netapp_api.NaElement('none') + return [lif_info.get_child_content('interface-name') for lif_info + in lif_info_list.get_children()] + + @na_utils.trace + def get_data_lif_details_for_nodes(self): + """Get the data LIF details for each node.""" + api_args = { + 'desired-attributes': { + 'data-lif-capacity-details-info': { + 'limit-for-node': None, + 'count-for-node': None, + 'node': None + }, + }, + } + result = self.send_iter_request('data-lif-capacity-details', api_args) + data_lif_info_list = result.get_child_by_name( + 'attributes-list') or netapp_api.NaElement('none') + data_lif_info = [] + for lif_info in data_lif_info_list.get_children(): + lif_info_node = { + 'limit-for-node': lif_info.get_child_content('limit-for-node'), + 'count-for-node': lif_info.get_child_content('count-for-node'), + 'node': lif_info.get_child_content('node'), + } + data_lif_info.append(lif_info_node) + return data_lif_info diff --git a/manila/share/drivers/netapp/dataontap/client/client_cmode_rest.py b/manila/share/drivers/netapp/dataontap/client/client_cmode_rest.py index 494925471a..45edfc3874 100644 --- a/manila/share/drivers/netapp/dataontap/client/client_cmode_rest.py +++ b/manila/share/drivers/netapp/dataontap/client/client_cmode_rest.py @@ -122,6 +122,7 @@ def _init_features(self): self.features.add_feature('FLEXGROUP', supported=True) self.features.add_feature('FLEXGROUP_FAN_OUT', supported=True) self.features.add_feature('SVM_MIGRATE', supported=True) + self.features.add_feature('UNIFIED_AGGR', supported=True) def __getattr__(self, name): """If method is not implemented for REST, try to call the ZAPI.""" @@ -549,7 +550,7 @@ def get_aggregate(self, aggregate_name): return {} fields = ('name,block_storage.primary.raid_type,' - 'block_storage.storage_type') + 'block_storage.storage_type,snaplock_type') try: aggrs = self._get_aggregates(aggregate_names=[aggregate_name], @@ -570,6 +571,9 @@ def get_aggregate(self, aggregate_name): aggr_attributes['block_storage']['primary']['raid_type'], 'is-hybrid': aggr_attributes['block_storage']['storage_type'] == 'hybrid', + 'snaplock-type': aggr_attributes.get('snaplock_type'), + 'is-snaplock': False if (aggr_attributes.get('snaplock_type') + == 'non_snaplock') else True } return aggregate @@ -857,7 +861,7 @@ def get_volume(self, volume_name): query = { 'name': volume_name, 'fields': 'aggregates.name,nas.path,name,svm.name,type,style,' - 'qos.policy.name,space.size,space.used' + 'qos.policy.name,space.size,space.used,snaplock.type' } result = self.send_request('/storage/volumes', 'get', query=query) @@ -891,7 +895,8 @@ def get_volume(self, volume_name): 'size-used': volume_infos.get('space', {}).get('used'), 'qos-policy-group-name': ( volume_infos.get('qos', {}).get('policy', {}).get('name')), - 'style-extended': volume_infos.get('style') + 'style-extended': volume_infos.get('style'), + 'snaplock-type': volume_infos.get('snaplock', {}).get('type'), } return volume @@ -953,7 +958,8 @@ def create_volume(self, aggregate_name, volume_name, size_gb, compression_enabled=False, max_files=None, snapshot_reserve=None, volume_type='rw', qos_policy_group=None, adaptive_qos_policy_group=None, - encrypt=False, mount_point_name=None, **options): + encrypt=False, mount_point_name=None, + snaplock_type=None, **options): """Creates a FlexVol volume synchronously.""" # NOTE(nahimsouza): In REST API, both FlexVol and FlexGroup volumes are @@ -967,14 +973,20 @@ def create_volume(self, aggregate_name, volume_name, size_gb, snapshot_reserve=snapshot_reserve, volume_type=volume_type, qos_policy_group=qos_policy_group, encrypt=encrypt, adaptive_qos_policy_group=adaptive_qos_policy_group, - mount_point_name=mount_point_name, **options) + mount_point_name=mount_point_name, snaplock_type=snaplock_type, + **options) + efficiency_policy = options.get('efficiency_policy', None) + self.update_volume_efficiency_attributes( + volume_name, dedup_enabled, compression_enabled, + efficiency_policy=efficiency_policy + ) - self.update_volume_efficiency_attributes(volume_name, - dedup_enabled, - compression_enabled) if max_files is not None: self.set_volume_max_files(volume_name, max_files) + if snaplock_type is not None: + self.set_snaplock_attributes(volume_name, **options) + @na_utils.trace def create_volume_async(self, aggregate_list, volume_name, size_gb, is_flexgroup=False, thin_provisioned=False, @@ -983,7 +995,7 @@ def create_volume_async(self, aggregate_list, volume_name, size_gb, volume_type='rw', qos_policy_group=None, encrypt=False, adaptive_qos_policy_group=None, auto_provisioned=False, mount_point_name=None, - **options): + snaplock_type=None, **options): """Creates FlexGroup/FlexVol volumes. If the parameter `is_flexgroup` is False, the creation process is @@ -1004,7 +1016,7 @@ def create_volume_async(self, aggregate_list, volume_name, size_gb, body.update(self._get_create_volume_body( volume_name, thin_provisioned, snapshot_policy, language, snapshot_reserve, volume_type, qos_policy_group, encrypt, - adaptive_qos_policy_group, mount_point_name)) + adaptive_qos_policy_group, mount_point_name, snaplock_type)) # NOTE(nahimsouza): When a volume is not a FlexGroup, volume creation # is made synchronously to replicate old ZAPI behavior. When ZAPI is @@ -1020,7 +1032,6 @@ def create_volume_async(self, aggregate_list, volume_name, size_gb, 'error-code': '', 'error-message': '' } - return job_info @na_utils.trace @@ -1028,7 +1039,7 @@ def _get_create_volume_body(self, volume_name, thin_provisioned, snapshot_policy, language, snapshot_reserve, volume_type, qos_policy_group, encrypt, adaptive_qos_policy_group, - mount_point_name=None): + mount_point_name, snaplock_type): """Builds the body to volume creation request.""" body = { @@ -1059,6 +1070,9 @@ def _get_create_volume_body(self, volume_name, thin_provisioned, else: body['encryption.enabled'] = 'false' + if snaplock_type is not None: + body['snaplock.type'] = snaplock_type + return body @na_utils.trace @@ -1110,10 +1124,23 @@ def get_volume_efficiency_status(self, volume_name): 'compression': compression, } + @na_utils.trace + def update_volume_snapshot_policy(self, volume_name, snapshot_policy): + """Set snapshot policy for the specified volume.""" + volume = self._get_volume_by_args(vol_name=volume_name) + uuid = volume['uuid'] + + body = { + 'snapshot_policy.name': snapshot_policy + } + # update snapshot policy + self.send_request(f'/storage/volumes/{uuid}', 'patch', body=body) + @na_utils.trace def update_volume_efficiency_attributes(self, volume_name, dedup_enabled, compression_enabled, - is_flexgroup=None): + is_flexgroup=False, + efficiency_policy=None): """Update dedupe & compression attributes to match desired values.""" efficiency_status = self.get_volume_efficiency_status(volume_name) @@ -1130,6 +1157,9 @@ def update_volume_efficiency_attributes(self, volume_name, dedup_enabled, elif not compression_enabled and efficiency_status['compression']: self.disable_compression_async(volume_name) + self.apply_volume_efficiency_policy( + volume_name, efficiency_policy=efficiency_policy) + @na_utils.trace def enable_dedupe_async(self, volume_name): """Enable deduplication on FlexVol/FlexGroup volume asynchronously.""" @@ -1181,6 +1211,21 @@ def disable_compression_async(self, volume_name): # update volume efficiency self.send_request(f'/storage/volumes/{uuid}', 'patch', body=body) + @na_utils.trace + def apply_volume_efficiency_policy(self, volume_name, + efficiency_policy=None): + if efficiency_policy: + """Apply volume efficiency policy to FlexVol""" + volume = self._get_volume_by_args(vol_name=volume_name) + uuid = volume['uuid'] + + body = { + 'efficiency': {'policy': efficiency_policy} + } + + # update volume efficiency policy only if policy_name is provided + self.send_request(f'/storage/volumes/{uuid}', 'patch', body=body) + @na_utils.trace def set_volume_max_files(self, volume_name, max_files): """Set share file limit.""" @@ -2494,12 +2539,15 @@ def modify_volume(self, aggregate_name, volume_name, self.send_request('/storage/volumes/' + volume['uuid'], 'patch', body=body) - + # Extract efficiency_policy from provisioning_options + efficiency_policy = options.get('efficiency_policy', None) # Efficiency options must be handled separately - self.update_volume_efficiency_attributes(volume_name, - dedup_enabled, - compression_enabled, - is_flexgroup=is_flexgroup) + self.update_volume_efficiency_attributes( + volume_name, dedup_enabled, compression_enabled, + is_flexgroup=is_flexgroup, efficiency_policy=efficiency_policy + ) + if self._is_snaplock_enabled_volume(volume_name): + self.set_snaplock_attributes(volume_name, **options) @na_utils.trace def start_volume_move(self, volume_name, vserver, destination_aggregate, @@ -4667,6 +4715,46 @@ def _add_port_to_broadcast_domain(self, node, port, domain, ipspace): } raise exception.NetAppException(msg % msg_args) + @na_utils.trace + def update_showmount(self, showmount): + """Update show mount for vserver. """ + # Get SVM UUID. + query = { + 'name': self.vserver, + 'fields': 'uuid' + } + res = self.send_request('/svm/svms', 'get', query=query) + if not res.get('records'): + msg = _('Vserver %s not found.') % self.vserver + raise exception.NetAppException(msg) + svm_id = res.get('records')[0]['uuid'] + + body = { + 'showmount_enabled': showmount, + } + self.send_request(f'/protocols/nfs/services/{svm_id}', 'patch', + body=body) + + @na_utils.trace + def update_pnfs(self, pnfs): + """Update pNFS for vserver. """ + # Get SVM UUID. + query = { + 'name': self.vserver, + 'fields': 'uuid' + } + res = self.send_request('/svm/svms', 'get', query=query) + if not res.get('records'): + msg = _('Vserver %s not found.') % self.vserver + raise exception.NetAppException(msg) + svm_id = res.get('records')[0]['uuid'] + + body = { + 'protocol.v41_features.pnfs_enabled': pnfs, + } + self.send_request(f'/protocols/nfs/services/{svm_id}', 'patch', + body=body) + @na_utils.trace def enable_nfs(self, versions, nfs_config=None): """Enables NFS on Vserver.""" @@ -5455,3 +5543,95 @@ def list_volume_snapshots(self, volume_name, snapmirror_label=None, return [snapshot_info['name'] for snapshot_info in response['records']] + + @na_utils.trace + def is_snaplock_compliance_clock_configured(self, node_name): + """Get the SnapLock compliance clock is configured for each node""" + node_uuid = self._get_cluster_node_uuid(node_name) + response = self.send_request( + f'/storage/snaplock/compliance-clocks/{node_uuid}', + 'get' + ) + clock_fmt_value = response.get('time') + return 'not configured' not in clock_fmt_value.lower() + + @na_utils.trace + def set_snaplock_attributes(self, volume_name, **options): + """Set the retention period for SnapLock enabled volume""" + body = {} + snaplock_attribute_mapping = { + 'snaplock_autocommit_period': 'snaplock.autocommit_period', + 'snaplock_min_retention_period': 'snaplock.retention.minimum', + 'snaplock_max_retention_period': 'snaplock.retention.maximum', + 'snaplock_default_retention_period': 'snaplock.retention.default', + } + for share_type_attr, na_api_attr in snaplock_attribute_mapping.items(): + if options.get(share_type_attr): + if share_type_attr == 'snaplock_default_retention_period': + default_retention_period = options.get( + 'snaplock_default_retention_period' + ) + if default_retention_period == "max": + options[share_type_attr] =\ + options.get('snaplock_max_retention_period') + elif default_retention_period == "min": + options[share_type_attr] = \ + options.get('snaplock_min_retention_period') + + body[na_api_attr] = utils.convert_time_duration_to_iso_format( + options.get(share_type_attr)) + + if all(value is None for value in body.values()): + LOG.debug("All SnapLock attributes are None, doesn't" + " updated SnapLock attributes") + return + + volume = self._get_volume_by_args(vol_name=volume_name) + uuid = volume['uuid'] + self.send_request(f'/storage/volumes/{uuid}', + 'patch', body=body) + + @na_utils.trace + def _is_snaplock_enabled_volume(self, volume_name): + """Get whether volume is SnapLock enabled or disabled""" + vol_attr = self.get_volume(volume_name) + return vol_attr.get('snaplock-type') in ("compliance", "enterprise") + + @na_utils.trace + def _get_cluster_node_uuid(self, node_name): + query = { + 'name': node_name + } + response = self.send_request('/cluster/nodes', + 'get', query=query) + return response.get('records')[0].get('uuid') + + @na_utils.trace + def get_storage_failover_partner(self, node_name): + """Get the partner node of HA pair""" + node_uuid = self._get_cluster_node_uuid(node_name) + node_details = self.send_request(f'/cluster/nodes/{node_uuid}', 'get') + return node_details['ha']['partners'][0]['name'] + + @na_utils.trace + def get_migratable_data_lif_for_node(self, node): + """Get available LIFs that can be migrated to another node.""" + protocols = ['data_nfs', 'data_cifs'] + query = { + 'services': '|'.join(protocols), + 'location.home_node.name': node, + 'fields': 'name', + } + result = self.send_request('/network/ip/interfaces', 'get', + query=query) + migratable_lif = [] + if self._has_records(result): + result = result.get('records', []) + for lif in result: + lif_result = self.send_request( + f'/network/ip/interfaces/{lif.get("uuid")}', 'get' + ) + failover_policy = lif_result['location']['failover'] + if failover_policy in ('default', 'sfo_partners_only'): + migratable_lif.append(lif["name"]) + return migratable_lif diff --git a/manila/share/drivers/netapp/dataontap/cluster_mode/drv_multi_svm.py b/manila/share/drivers/netapp/dataontap/cluster_mode/drv_multi_svm.py index 47b4aa6f93..268813ab7f 100644 --- a/manila/share/drivers/netapp/dataontap/cluster_mode/drv_multi_svm.py +++ b/manila/share/drivers/netapp/dataontap/cluster_mode/drv_multi_svm.py @@ -110,9 +110,9 @@ def unmanage_snapshot_with_server(self, snapshot, share_server=None): self.library.unmanage_snapshot(snapshot, share_server=share_server) def update_access(self, context, share, access_rules, add_rules, - delete_rules, **kwargs): + delete_rules, update_rules, **kwargs): self.library.update_access(context, share, access_rules, add_rules, - delete_rules, **kwargs) + delete_rules, update_rules, **kwargs) def _update_share_stats(self, data=None): data = self.library.get_share_stats( @@ -398,3 +398,16 @@ def restore_backup_continue(self, context, backup, share, **kwargs): def delete_backup(self, context, backup, share, **kwargs): return self.library.delete_backup(context, backup, share, **kwargs) + + def update_share_from_metadata(self, context, share, metadata, + share_server=None): + self.library.update_share_from_metadata( + context, share, metadata, share_server=share_server) + + def update_share_network_subnet_from_metadata(self, context, + share_network, + share_network_subnet, + share_server, metadata): + self.library.update_share_network_subnet_from_metadata( + context, share_network, share_network_subnet, + share_server, metadata) diff --git a/manila/share/drivers/netapp/dataontap/cluster_mode/drv_single_svm.py b/manila/share/drivers/netapp/dataontap/cluster_mode/drv_single_svm.py index c4d5602a26..eed8b2ab40 100644 --- a/manila/share/drivers/netapp/dataontap/cluster_mode/drv_single_svm.py +++ b/manila/share/drivers/netapp/dataontap/cluster_mode/drv_single_svm.py @@ -99,9 +99,9 @@ def unmanage_snapshot_with_server(self, snapshot, share_server=None): raise NotImplementedError def update_access(self, context, share, access_rules, add_rules, - delete_rules, **kwargs): + delete_rules, update_rules, **kwargs): self.library.update_access(context, share, access_rules, add_rules, - delete_rules, **kwargs) + delete_rules, update_rules, **kwargs) def _update_share_stats(self, data=None): data = self.library.get_share_stats( @@ -362,3 +362,14 @@ def restore_backup_continue(self, context, backup, share, **kwargs): def delete_backup(self, context, backup, share, **kwargs): return self.library.delete_backup(context, backup, share, **kwargs) + + def update_share_from_metadata(self, context, share, metadata, + share_server=None): + self.library.update_share_from_metadata( + context, share, metadata, share_server=share_server) + + def update_share_network_subnet_from_metadata(self, context, + share_network, + share_network_subnet, + share_server, metadata): + raise NotImplementedError diff --git a/manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py b/manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py index 6d5759fead..18830003fa 100644 --- a/manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py +++ b/manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py @@ -118,6 +118,16 @@ class NetAppCmodeFileStorageLibrary(object): 'netapp:fpolicy_extensions_to_exclude': 'fpolicy_extensions_to_exclude', 'netapp:fpolicy_file_operations': 'fpolicy_file_operations', + 'netapp:efficiency_policy': 'efficiency_policy', + 'netapp_snaplock_type': 'snaplock_type', + 'netapp:snaplock_autocommit_period': + 'snaplock_autocommit_period', + 'netapp:snaplock_min_retention_period': + 'snaplock_min_retention_period', + 'netapp:snaplock_max_retention_period': + 'snaplock_max_retention_period', + 'netapp:snaplock_default_retention_period': + 'snaplock_default_retention_period', } # Maps standard extra spec keys to legacy NetApp keys @@ -153,6 +163,8 @@ class NetAppCmodeFileStorageLibrary(object): 'link', 'lookup', 'open', 'read', 'write', 'rename', 'rename_dir', 'setattr', 'symlink'] + SNAPLOCK_TYPE = ['compliance', 'enterprise'] + def __init__(self, driver_name, **kwargs): na_utils.validate_driver_instantiation(**kwargs) @@ -183,6 +195,7 @@ def __init__(self, driver_name, **kwargs): self._cache_pool_status = None self._flexgroup_pools = {} self._is_flexgroup_auto = False + self._is_snaplock_compliance_configured = False self._app_version = kwargs.get('app_version', 'unknown') @@ -202,6 +215,14 @@ def do_setup(self, context): self._have_cluster_creds = self._client.check_for_cluster_credentials() if self._have_cluster_creds is True: self._set_cluster_info() + # Set SnapLock compliance clock configured on both the nodes + nodes = self._client.list_cluster_nodes() + for node in nodes: + self._is_snaplock_compliance_configured = ( + self._client.is_snaplock_compliance_clock_configured(node) + ) + if not self._is_snaplock_compliance_configured: + break self._licenses = self._get_licenses() self._revert_to_snapshot_support = self._check_snaprestore_license() @@ -492,7 +513,6 @@ def _get_pools(self, get_filter_function=None, goodness_function=None): aggr_space, aggr_list) pool = self._get_pool(pool_name, total_gb, free_gb, used_gb) - cached_pools.append(pool) pool_with_func = copy.deepcopy(pool) pool_with_func['filter_function'] = filter_function @@ -516,7 +536,6 @@ def _get_pool(self, pool_name, total_capacity_gb, free_capacity_gb, netapp_flexvol_encryption = self._cluster_info.get( 'nve_support', False) - reserved_percentage = self.configuration.reserved_share_percentage reserved_snapshot_percentage = ( self.configuration.reserved_share_from_snapshot_percentage or @@ -551,7 +570,7 @@ def _get_pool(self, pool_name, total_capacity_gb, free_capacity_gb, 'revert_to_snapshot_support': self._revert_to_snapshot_support, 'security_service_update_support': True, 'share_server_multiple_subnet_support': True, - 'mount_point_name_support': True + 'mount_point_name_support': True, } # Add storage service catalog data. @@ -1125,6 +1144,9 @@ def _allocate_container(self, share, vserver, vserver_client, provisioning_options = self._get_provisioning_options_for_share( share, vserver, vserver_client=vserver_client, set_qos=set_qos) + if provisioning_options.get('snaplock_type'): + self._check_snaplock_compatibility() + if replica: # If this volume is intended to be a replication destination, # create it as the 'data-protection' type @@ -1137,6 +1159,7 @@ def _allocate_container(self, share, vserver, vserver_client, 'provisioning options %(options)s', {'share': share_name, 'pool': pool_name, 'options': provisioning_options}) + if self._is_flexgroup_pool(pool_name): aggr_list = self._get_flexgroup_aggregate_list(pool_name) self._create_flexgroup_share( @@ -1179,14 +1202,16 @@ def _apply_snapdir_visibility( def _create_flexgroup_share(self, vserver_client, aggr_list, share_name, size, snapshot_reserve, dedup_enabled=False, compression_enabled=False, max_files=None, - mount_point_name=None, **provisioning_options): + mount_point_name=None, snaplock_type=None, + **provisioning_options): """Create a FlexGroup share using async API with job.""" start_timeout = ( self.configuration.netapp_flexgroup_aggregate_not_busy_timeout) job_info = self.wait_for_start_create_flexgroup( start_timeout, vserver_client, aggr_list, share_name, size, - snapshot_reserve, mount_point_name, **provisioning_options) + snapshot_reserve, mount_point_name, snaplock_type, + **provisioning_options) if not job_info['jobid'] or job_info['error-code']: msg = "Error creating FlexGroup share: %s." @@ -1195,18 +1220,25 @@ def _create_flexgroup_share(self, vserver_client, aggr_list, share_name, timeout = self.configuration.netapp_flexgroup_volume_online_timeout self.wait_for_flexgroup_deployment(vserver_client, job_info['jobid'], timeout) + efficiency_policy = provisioning_options.get('efficiency_policy', None) vserver_client.update_volume_efficiency_attributes( - share_name, dedup_enabled, compression_enabled, is_flexgroup=True) + share_name, dedup_enabled, compression_enabled, is_flexgroup=True, + efficiency_policy=efficiency_policy) if max_files is not None: vserver_client.set_volume_max_files(share_name, max_files) + if snaplock_type is not None: + vserver_client.set_snaplock_attributes(share_name, + **provisioning_options) + @na_utils.trace def wait_for_start_create_flexgroup(self, start_timeout, vserver_client, aggr_list, share_name, size, snapshot_reserve, - mount_point_name=None, + mount_point_name, + snaplock_type, **provisioning_options): """Wait for starting create FlexGroup volume succeed. @@ -1221,6 +1253,7 @@ def wait_for_start_create_flexgroup(self, start_timeout, vserver_client, :param size: size to be provisioned. :param snapshot_reserve: snapshot reserve option. :param mount_point_name: junction_path_name. + :param snaplock_type: SnapLock type :param provisioning_options: other provision not required options. """ @@ -1237,6 +1270,7 @@ def _start_create_flexgroup_volume(): snapshot_reserve=snapshot_reserve, auto_provisioned=self._is_flexgroup_auto, mount_point_name=mount_point_name, + snaplock_type=snaplock_type, **provisioning_options) except netapp_api.NaApiError as e: with excutils.save_and_reraise_exception() as raise_ctxt: @@ -1324,12 +1358,25 @@ def _check_string_extra_specs_validity(self, share, extra_specs): self._check_fpolicy_file_operations( share, extra_specs['netapp:fpolicy_file_operations']) + # Validate extra_specs for SnapLock + snaplock_attributes = [ + 'netapp:snaplock_autocommit_period', + 'netapp:snaplock_min_retention_period', + 'netapp:snaplock_max_retention_period', + 'netapp:snaplock_default_retention_period' + ] + for attribute in snaplock_attributes: + if attribute in extra_specs: + self._check_snaplock_attributes(share, attribute, + extra_specs[attribute]) + @na_utils.trace def _check_if_max_files_is_valid(self, share, value): """Check if max_files has a valid value.""" if int(value) < 0: args = {'value': value, 'key': 'netapp:max_files', - 'type_id': share['share_type_id'], 'share_id': share['id']} + 'type_id': share['share_type_id'], + 'share_id': share['id']} msg = _('Invalid value "%(value)s" for extra_spec "%(key)s" ' 'in share_type %(type_id)s for share %(share_id)s.') raise exception.NetAppException(msg % args) @@ -1348,6 +1395,65 @@ def _check_fpolicy_file_operations(self, share, value): '%(share_id)s.') raise exception.NetAppException(msg % args) + @na_utils.trace + def _check_snaplock_attributes(self, share, key, value): + """Validate the SnapLock retention periods""" + valid_units_for_period = ["minutes", "hours", "days", + "months", "years"] + pattern = re.compile(r'^\d+\s*(minutes|hours|days|months|years)$') + common_msg = ("a number followed suffix, valid suffix are: " + f"{valid_units_for_period}. For example, a value" + f" if '2hours' represents a {key}" + " of 2 hours.") + + if key == 'netapp:snaplock_autocommit_period': + is_matched = pattern.match(value) + extra_msg = (f"The value of the {key} should be" + f" {common_msg} ") + if not is_matched: + self._raise_snaplock_exception(share, key, value, extra_msg) + elif (key == 'netapp:snaplock_min_retention_period' + or key == 'netapp:snaplock_max_retention_period'): + is_matched = pattern.match(value) or value == "infinite" + extra_msg = (f"The value of the {key} should be " + f"'infinite' or {common_msg}") + if not is_matched: + self._raise_snaplock_exception(share, key, value, extra_msg) + elif key == 'netapp:snaplock_default_retention_period': + is_matched = (pattern.match(value) or value == "infinite" + or value == "min" or value == "max") + extra_msg = (f"The value of the {key} should be " + f"'infinite', 'min', 'max', or {common_msg}") + if not is_matched: + self._raise_snaplock_exception(share, key, value, extra_msg) + + def _raise_snaplock_exception(self, share, key, value, extra_msg): + args = {'value': value, + 'extra_spec': key, + 'type_id': share['share_type_id'], + 'share_id': share['id'], + 'extra_msg': extra_msg} + msg = _('Invalid value "%(value)s" for extra_spec ' + '"%(extra_spec)s" in share_type %(type_id)s for share ' + '%(share_id)s. %(extra_msg)s') + raise exception.NetAppException(msg % args) + + @na_utils.trace + def _check_snaplock_compatibility(self): + """Check SnapLock license and compliance clock sync with the nodes""" + # Check SnapLock license is enabled on cluster + if self._have_cluster_creds: + if 'snaplock' not in self._licenses: + exception.NetAppException("SnapLock License is not" + " available on ONTAP") + if not self._is_snaplock_compliance_configured: + msg = _('Compliance clock is not configured for one' + ' of the nodes.') + raise exception.NetAppException(msg) + else: + LOG.warning("Unable to verify if SnapLock is enabled for" + " the cluster.") + @na_utils.trace def _check_boolean_extra_specs_validity(self, share, specs, keys_of_interest): @@ -1360,7 +1466,8 @@ def _check_boolean_extra_specs_validity(self, share, specs, 'netapp:compression': compression} type_id = share['share_type_id'] share_id = share['id'] - args = {'type_id': type_id, 'share_id': share_id, 'spec': spec} + args = {'type_id': type_id, 'share_id': share_id, + 'spec': spec} msg = _('Invalid combination of extra_specs in share_type ' '%(type_id)s for share %(share_id)s: %(spec)s: ' 'deduplication must be enabled in order for ' @@ -2376,7 +2483,7 @@ def shrink_share(self, share, new_size, share_server=None): @na_utils.trace def update_access(self, context, share, access_rules, add_rules, - delete_rules, share_server=None): + delete_rules, update_rules, share_server=None): """Updates access rules for a share.""" # NOTE(felipe_rodrigues): do not add export rules to a non-active @@ -2448,30 +2555,51 @@ def _update_ssc_info(self): 'netapp_flexgroup': True, } + # Add the SnapLock info for FlexVol + for aggr_name in aggregate_names: + if self._client.features.UNIFIED_AGGR: + snaplock_dict = {'netapp_snaplock_type': self.SNAPLOCK_TYPE} + else: + snaplock_dict = { + 'netapp_snaplock_type': + self._get_aggregate_snaplock_type(aggr_name) + } + ssc_stats[aggr_name].update(snaplock_dict) + # Add aggregate specs for pools aggr_set = set(aggregate_names).union(self._get_flexgroup_aggr_set()) if self._have_cluster_creds and aggr_set: aggr_info = self._get_aggregate_info(aggr_set) # FlexVol pools + aggr_info_flexvol = copy.deepcopy(aggr_info) for aggr_name in aggregate_names: - ssc_stats[aggr_name].update(aggr_info[aggr_name]) + if self._client.features.UNIFIED_AGGR: + aggr_info_flexvol[aggr_name]['netapp_snaplock_type'] = \ + self.SNAPLOCK_TYPE + ssc_stats[aggr_name].update(aggr_info_flexvol[aggr_name]) # FlexGroup pools for pool_name, aggr_list in flexgroup_pools.items(): raid_type = set() hybrid = set() disk_type = set() + snaplock_type = set() for aggr in aggr_list: raid_type.add(aggr_info[aggr]['netapp_raid_type']) hybrid.add(aggr_info[aggr]['netapp_hybrid_aggregate']) disk_type = disk_type.union( aggr_info[aggr]['netapp_disk_type']) + snaplock_type.add(aggr_info[aggr]['netapp_snaplock_type']) ssc_stats[pool_name].update({ 'netapp_raid_type': " ".join(sorted(raid_type)), 'netapp_hybrid_aggregate': " ".join(sorted(hybrid)), 'netapp_disk_type': sorted(list(disk_type)), + 'netapp_snaplock_type': self.SNAPLOCK_TYPE + if self._client.features.UNIFIED_AGGR + else " ".join(sorted(snaplock_type)), + }) self._ssc_stats = ssc_stats @@ -2495,6 +2623,7 @@ def _get_aggregate_info(self, aggregate_names): 'netapp_hybrid_aggregate': hybrid, 'netapp_disk_type': disk_types, 'netapp_is_home': aggregate.get('is-home'), + 'netapp_snaplock_type': aggregate.get('snaplock-type'), } return aggr_info @@ -3391,6 +3520,17 @@ def migration_check_compatibility(self, context, source_share, "pool is FlexGroup type.") raise exception.NetAppException(msg) + # Check the source/destination pool SnapLock type, for + # ONTAP version < 9.10.1 + if not self._is_snaplock_compatible_for_migration( + source_pool, + destination_aggregate + ): + msg = _("Cannot migrate share because the source and " + "destination pool support different SnapLock" + " type.") + raise exception.NetAppException(msg) + # Validate new extra-specs are valid on the destination extra_specs = share_types.get_extra_specs_from_share( destination_share) @@ -5013,3 +5153,88 @@ def _resource_cleanup_for_backup(self, backup, share_instance, # Delete Vserver if share_server is not None: self._delete_backup_vserver(backup, des_vserver) + + @na_utils.trace + def update_volume_snapshot_policy(self, share, snapshot_policy, + share_server=None): + share_name = self._get_backend_share_name(share['id']) + _, vserver_client = self._get_vserver(share_server=share_server) + vserver_client.update_volume_snapshot_policy(share_name, + snapshot_policy) + + @na_utils.trace + def update_showmount(self, showmount, share_server=None): + showmount = showmount.lower() + if showmount not in ('true', 'false'): + err_msg = _("Invalid showmount value supplied: %s.") % showmount + raise exception.NetAppException(err_msg) + + vserver, vserver_client = self._get_vserver(share_server=share_server) + vserver_client.update_showmount(showmount) + + def update_pnfs(self, pnfs, share_server=None): + pnfs = pnfs.lower() + if pnfs not in ('true', 'false'): + err_msg = _("Invalid pnfs value supplied: %s.") % pnfs + raise exception.NetAppException(err_msg) + + vserver, vserver_client = self._get_vserver(share_server=share_server) + vserver_client.update_pnfs(pnfs) + + @na_utils.trace + def update_share_from_metadata(self, context, share, metadata, + share_server=None): + metadata_update_func_map = { + "snapshot_policy": "update_volume_snapshot_policy", + } + + for k, v in metadata.items(): + metadata_update_method = ( + getattr(self, metadata_update_func_map.get(k)) + if k in metadata_update_func_map.keys() else None) + + if metadata_update_method: + metadata_update_method(share, v, share_server=share_server) + + def update_share_network_subnet_from_metadata(self, context, + share_network, + share_network_subnet, + share_server, metadata): + metadata_update_func_map = { + "showmount": "update_showmount", + "pnfs": "update_pnfs", + } + + for k, v in metadata.items(): + metadata_update_method = ( + getattr(self, metadata_update_func_map.get(k)) + if k in metadata_update_func_map.keys() else None) + + if metadata_update_method: + metadata_update_method(v, share_server=share_server) + + @na_utils.trace + def _get_aggregate_snaplock_type(self, aggr_name): + if self._have_cluster_creds: + aggr_attributes = self._client.get_aggregate(aggr_name) + snaplock_type = aggr_attributes.get('snaplock-type') + + else: + snaplock_type = self._client.get_vserver_aggr_snaplock_type( + aggr_name, + ) + return snaplock_type + + @na_utils.trace + def _is_snaplock_compatible_for_migration(self, source_pool, des_pool): + if self._client.features.UNIFIED_AGGR: + return True + if (self.configuration.netapp_use_legacy_client + and self._client.features.SNAPLOCK): + source_snaplock_type = self._ssc_stats.get(source_pool, {}).get( + 'netapp_snaplock_type') + des_snaplock_type = self._ssc_stats.get(des_pool, {}).get( + 'netapp_snaplock_type') + if source_snaplock_type != des_snaplock_type: + return False + return True diff --git a/manila/share/drivers/netapp/dataontap/cluster_mode/lib_multi_svm.py b/manila/share/drivers/netapp/dataontap/cluster_mode/lib_multi_svm.py index 98ec94c34a..fa8a18ca93 100644 --- a/manila/share/drivers/netapp/dataontap/cluster_mode/lib_multi_svm.py +++ b/manila/share/drivers/netapp/dataontap/cluster_mode/lib_multi_svm.py @@ -225,6 +225,10 @@ def setup_server_with_lock(): if self.is_nfs_config_supported: server_details['nfs_config'] = jsonutils.dumps(nfs_config) + if self.configuration.netapp_restrict_lif_creation_per_ha_pair: + self._check_data_lif_count_limit_reached_for_ha_pair( + self._client + ) try: self._create_vserver(vserver_name, network_info, metadata, nfs_config=nfs_config) @@ -1224,6 +1228,12 @@ def _check_compatibility_for_svm_migrate( 'neutron_subnet_id') } + # Check the LIF creation on destination cluster when + # 'netapp_restrict_lif_creation_per_ha_pair' option is set + # to true. + if self.configuration.netapp_restrict_lif_creation_per_ha_pair: + self._check_data_lif_count_limit_reached_for_ha_pair(dest_client) + # 2. Create new ipspace, port and broadcast domain. node_name = self._client.list_cluster_nodes()[0] port = self._get_node_data_port(node_name) @@ -2427,3 +2437,40 @@ def _delete_backup_vserver(self, backup, des_vserver): with excutils.save_and_reraise_exception() as exc_context: if 'has shares' in e.msg: exc_context.reraise = False + + def _check_data_lif_count_limit_reached_for_ha_pair(self, client): + ha_pair = {node: client.get_storage_failover_partner(node) + for node in client.list_cluster_nodes()} + # TODO(agireesh): Get the data LIFs details for node using REST call + # The 'get_data_lif_details_for_nodes' method is missing for REST + # workflow because there is no REST available to retrieve the data + # LIF's capacity and details for the nodes. Filed the RFE on ONTAP + # to implement the corresponding REST, and once it is available, the + # REST workflow will be added as part of the fix (bug #2100673). + lif_info_for_node = client.get_data_lif_details_for_nodes() + lif_info_dict = {info['node']: info for info in lif_info_for_node} + + for node, ha_partner in ha_pair.items(): + if node in lif_info_dict: + data_lif_count = int(lif_info_dict[node].get( + 'count-for-node', 0) + ) + lif_limit_for_node = int(lif_info_dict[node].get( + 'limit-for-node') + ) + migratable_data_lifs = ( + client.get_migratable_data_lif_for_node(ha_partner) + ) + expected_lif_count_after_failover = ( + data_lif_count + len(migratable_data_lifs) + ) + if expected_lif_count_after_failover > lif_limit_for_node: + msg_args = { + 'data_lif': expected_lif_count_after_failover, + 'lif_limit': lif_limit_for_node, + } + msg = _("If a partner node fails, the number of data LIFs" + " {%(data_lif)s} will exceed the node's maximum " + "data LIF limit {%(lif_limit)s}") % msg_args + LOG.error(msg) + raise exception.NetAppException(msg) diff --git a/manila/share/drivers/netapp/options.py b/manila/share/drivers/netapp/options.py index deadcb4995..e405104af3 100644 --- a/manila/share/drivers/netapp/options.py +++ b/manila/share/drivers/netapp/options.py @@ -198,6 +198,14 @@ 'certificate created during the vserver creation. This ' 'option only applies when the option ' 'driver_handles_share_servers is set to True.'), + cfg.BoolOpt('netapp_restrict_lif_creation_per_ha_pair', + default=False, + help='Prevent the creation of a share server if total number' + ' of data LIFs on one node of HA pair, including those' + ' that can be migrated in case of failure, exceeds the ' + 'maximum data LIFs supported by the node. This option ' + 'guarantees that, in the event of a node failure, the' + ' partner node will be able to takeover all data LIFs.') ] netapp_cluster_opts = [ diff --git a/manila/share/drivers/nexenta/ns4/nexenta_nas.py b/manila/share/drivers/nexenta/ns4/nexenta_nas.py index 650489e15e..2a23c96ad1 100644 --- a/manila/share/drivers/nexenta/ns4/nexenta_nas.py +++ b/manila/share/drivers/nexenta/ns4/nexenta_nas.py @@ -108,19 +108,21 @@ def delete_snapshot(self, context, snapshot, share_server=None): self.helper.delete_snapshot(snapshot['share_name'], snapshot['name']) def update_access(self, context, share, access_rules, add_rules, - delete_rules, share_server=None): + delete_rules, update_rules, share_server=None): """Update access rules for given share. :param context: The `context.RequestContext` object for the request :param share: Share that will have its access rules updated. :param access_rules: All access rules for given share. This list - is enough to update the access rules for given share. + is enough to update the access rules for given share. :param add_rules: Empty List or List of access rules which should be - added. access_rules already contains these rules. Not used by this - driver. + added. access_rules already contains these rules. Not used + by this driver. :param delete_rules: Empty List or List of access rules which should be - removed. access_rules doesn't contain these rules. Not used by - this driver. + removed. access_rules doesn't contain these rules. Not used by + this driver. + :param update_rules: Empty List or List of access rules which should be + updated. access_rules already contains these rules. :param share_server: Data structure with share server information. Not used by this driver. """ diff --git a/manila/share/drivers/nexenta/ns5/nexenta_nas.py b/manila/share/drivers/nexenta/ns5/nexenta_nas.py index fcb05aaaf0..2a02466745 100644 --- a/manila/share/drivers/nexenta/ns5/nexenta_nas.py +++ b/manila/share/drivers/nexenta/ns5/nexenta_nas.py @@ -392,20 +392,22 @@ def manage_existing(self, share, driver_options): }]} def update_access(self, context, share, access_rules, add_rules, - delete_rules, share_server=None): + delete_rules, update_rules, share_server=None): """Update access rules for given share. Using access_rules list for both adding and deleting rules. :param context: The `context.RequestContext` object for the request :param share: Share that will have its access rules updated. :param access_rules: All access rules for given share. This list - is enough to update the access rules for given share. + is enough to update the access rules for given share. :param add_rules: Empty List or List of access rules which should be - added. access_rules already contains these rules. Not used by this - driver. + added. access_rules already contains these rules. Not used by + this driver. :param delete_rules: Empty List or List of access rules which should be - removed. access_rules doesn't contain these rules. Not used by - this driver. + removed. access_rules doesn't contain these rules. Not used by + this driver. + :param update_rules: Empty List or List of access rules which should be + updated. access_rules already contains these rules. :param share_server: Data structure with share server information. Not used by this driver. """ diff --git a/manila/share/drivers/purestorage/flashblade.py b/manila/share/drivers/purestorage/flashblade.py index 138059c9a4..f22f9df7c5 100644 --- a/manila/share/drivers/purestorage/flashblade.py +++ b/manila/share/drivers/purestorage/flashblade.py @@ -467,6 +467,7 @@ def update_access( access_rules, add_rules, delete_rules, + update_rules, share_server=None, ): """Update access of share""" diff --git a/manila/share/drivers/qnap/qnap.py b/manila/share/drivers/qnap/qnap.py index 20c8626613..b96c0181b2 100644 --- a/manila/share/drivers/qnap/qnap.py +++ b/manila/share/drivers/qnap/qnap.py @@ -627,7 +627,7 @@ def _get_vol_host(self, host_list, vol_name_timestamp): @utils.synchronized('qnap-update_access') def update_access(self, context, share, access_rules, add_rules, - delete_rules, share_server=None): + delete_rules, update_rules, share_server=None): if not (add_rules or delete_rules): volName = self.private_storage.get(share['id'], 'volName') LOG.debug('volName: %s', volName) diff --git a/manila/share/drivers/quobyte/quobyte.py b/manila/share/drivers/quobyte/quobyte.py index e2c2ec071a..461a4bc883 100644 --- a/manila/share/drivers/quobyte/quobyte.py +++ b/manila/share/drivers/quobyte/quobyte.py @@ -365,7 +365,7 @@ def shrink_share(self, shrink_share, shrink_size, share_server=None): self._resize_share(share=shrink_share, new_size=shrink_size) def update_access(self, context, share, access_rules, add_rules, - delete_rules, share_server=None): + delete_rules, update_rules, share_server=None): """Update access rules for given share. Two different cases are supported in here: @@ -385,6 +385,8 @@ def update_access(self, context, share, access_rules, add_rules, added. access_rules already contains these rules. :param delete_rules: Empty List or List of access rules which should be removed. access_rules doesn't contain these rules. + :param update_rules: Empty List or List of access rules which should be + updated. access_rules already contains these rules. :param share_server: None or Share server model :raises If all of the *_rules params are None the method raises an InvalidShareAccess exception diff --git a/manila/share/drivers/service_instance.py b/manila/share/drivers/service_instance.py index a28a5121eb..b873a6b844 100644 --- a/manila/share/drivers/service_instance.py +++ b/manila/share/drivers/service_instance.py @@ -856,7 +856,7 @@ def teardown_network(self, server_details): LOG.debug("Failed to delete port %(port_id)s with error: " "\n %(exc)s", {"port_id": port_id, "exc": e}) - if router_id and subnet_id: + if subnet_id: ports = self.neutron_api.list_ports( fields=['device_id', 'device_owner'], fixed_ips=['subnet_id=%s' % subnet_id]) @@ -873,17 +873,18 @@ def teardown_network(self, server_details): # exist that use this subnet. So, do not remove it # from router. return - try: - # NOTE(vponomaryov): there is no other share servers or - # some VMs that use this subnet. So, remove it from router. - self.neutron_api.router_remove_interface( - router_id, subnet_id) - except exception.NetworkException as e: - if e.kwargs['code'] != 404: - raise - LOG.debug('Subnet %(subnet_id)s is not attached to the ' - 'router %(router_id)s.', - {'subnet_id': subnet_id, 'router_id': router_id}) + if router_id: + try: + # NOTE(vponomaryov): there is no other share servers or + # some VMs that use this subnet. So, remove it from router. + self.neutron_api.router_remove_interface( + router_id, subnet_id) + except exception.NetworkException as e: + if e.kwargs['code'] != 404: + raise + LOG.debug('Subnet %(subnet_id)s is not attached to the ' + 'router %(router_id)s.', + {'subnet_id': subnet_id, 'router_id': router_id}) self.neutron_api.update_subnet(subnet_id, '') @utils.synchronized( diff --git a/manila/share/drivers/tegile/tegile.py b/manila/share/drivers/tegile/tegile.py index 52902780b2..9c3ce07108 100644 --- a/manila/share/drivers/tegile/tegile.py +++ b/manila/share/drivers/tegile/tegile.py @@ -402,7 +402,7 @@ def _check_share_access(self, share_proto, access_type): @debugger def update_access(self, context, share, access_rules, add_rules, - delete_rules, share_server=None): + delete_rules, update_rules, share_server=None): if not (add_rules or delete_rules): # Recovery mode pool, project, share_name = ( diff --git a/manila/share/drivers/veritas/veritas_isa.py b/manila/share/drivers/veritas/veritas_isa.py index 8483235dff..8041b3cbb5 100644 --- a/manila/share/drivers/veritas/veritas_isa.py +++ b/manila/share/drivers/veritas/veritas_isa.py @@ -421,7 +421,7 @@ def _deny_access(self, context, share, access, share_server=None): json.dumps(data2), 'DELETE') def update_access(self, context, share, access_rules, add_rules, - delete_rules, share_server=None): + delete_rules, update_rules, share_server=None): """Update access to the share.""" if (add_rules or delete_rules): diff --git a/manila/share/drivers/windows/windows_smb_helper.py b/manila/share/drivers/windows/windows_smb_helper.py index b96830f2fe..be2d47c12d 100644 --- a/manila/share/drivers/windows/windows_smb_helper.py +++ b/manila/share/drivers/windows/windows_smb_helper.py @@ -179,7 +179,7 @@ def _revoke_share_access(self, server, share_name, access_to): 'share_name': share_name}) def update_access(self, server, share_name, access_rules, add_rules, - delete_rules): + delete_rules, update_rules): self.validate_access_rules( access_rules + add_rules, self._SUPPORTED_ACCESS_TYPES, diff --git a/manila/share/drivers/zadara/zadara.py b/manila/share/drivers/zadara/zadara.py index 37ab6015b9..66c62bcffb 100644 --- a/manila/share/drivers/zadara/zadara.py +++ b/manila/share/drivers/zadara/zadara.py @@ -384,7 +384,7 @@ def _deny_access(self, context, share, access, share_server=None): vpsa_srv=vpsa_srv) def update_access(self, context, share, access_rules, add_rules, - delete_rules, share_server=None): + delete_rules, update_rules, share_server=None): access_updates = {} if not (add_rules or delete_rules): # add_rules and delete_rules can be empty lists, in cases diff --git a/manila/share/drivers/zfsonlinux/driver.py b/manila/share/drivers/zfsonlinux/driver.py index 799f70ace9..5bcef88317 100644 --- a/manila/share/drivers/zfsonlinux/driver.py +++ b/manila/share/drivers/zfsonlinux/driver.py @@ -704,7 +704,7 @@ def shrink_share(self, share, new_size, share_server=None): @ensure_share_server_not_provided def update_access(self, context, share, access_rules, add_rules, - delete_rules, share_server=None): + delete_rules, update_rules, share_server=None): """Updates access rules for given share.""" dataset_name = self._get_dataset_name(share) executor = self._get_shell_executor_by_host(share['host']) diff --git a/manila/share/manager.py b/manila/share/manager.py index d0aff1ddc8..8c05448f0b 100644 --- a/manila/share/manager.py +++ b/manila/share/manager.py @@ -264,7 +264,7 @@ def wrapped(self, *args, **kwargs): class ShareManager(manager.SchedulerDependentManager): """Manages NAS storages.""" - RPC_API_VERSION = '1.27' + RPC_API_VERSION = '1.30' def __init__(self, share_driver=None, service_name=None, *args, **kwargs): """Load the driver from args, or from flags.""" @@ -401,7 +401,8 @@ def is_service_ready(self): """ return self.driver.initialized - def ensure_driver_resources(self, ctxt): + def ensure_driver_resources(self, ctxt, skip_backend_info_check=False): + update_instances_status = CONF.update_shares_status_on_ensure old_backend_info = self.db.backend_info_get(ctxt, self.host) old_backend_info_hash = (old_backend_info.get('info_hash') if old_backend_info is not None else None) @@ -409,31 +410,33 @@ def ensure_driver_resources(self, ctxt): new_backend_info_hash = None backend_info_implemented = True update_share_instances = [] - try: - new_backend_info = self.driver.get_backend_info(ctxt) - except Exception as e: - if not isinstance(e, NotImplementedError): - LOG.exception( - "The backend %(host)s could not get backend info.", - {'host': self.host}) - raise - else: - backend_info_implemented = False + if not skip_backend_info_check: + try: + new_backend_info = self.driver.get_backend_info(ctxt) + except Exception as e: + if not isinstance(e, NotImplementedError): + LOG.exception( + "The backend %(host)s could not get backend info.", + {'host': self.host}) + raise + else: + backend_info_implemented = False + LOG.debug( + ("The backend %(host)s does not support get backend" + " info method."), + {'host': self.host}) + + if new_backend_info: + new_backend_info_hash = hashlib.sha1( + str(sorted(new_backend_info.items())).encode( + 'utf-8')).hexdigest() + if ((old_backend_info_hash == new_backend_info_hash and + backend_info_implemented) and not skip_backend_info_check): LOG.debug( - ("The backend %(host)s does not support get backend" - " info method."), + ("Ensure shares is being skipped because the %(host)s's " + "old backend info is the same as its new backend info."), {'host': self.host}) - - if new_backend_info: - new_backend_info_hash = hashlib.sha1(str( - sorted(new_backend_info.items())).encode('utf-8')).hexdigest() - if (old_backend_info_hash == new_backend_info_hash and - backend_info_implemented): - LOG.debug( - ("Ensure shares is being skipped because the %(host)s's old " - "backend info is the same as its new backend info."), - {'host': self.host}) - return + return share_instances = self.db.share_instance_get_all_by_host( ctxt, self.host) @@ -467,7 +470,19 @@ def ensure_driver_resources(self, ctxt): ctxt, share_instance) update_share_instances.append(share_instance_dict) + do_service_status_update = False if update_share_instances: + # No reason to update the shares status if nothing will be done. + do_service_status_update = True + service = self.db.service_get_by_args( + ctxt, self.host, 'manila-share') + self.db.service_update(ctxt, service['id'], {'ensuring': True}) + if update_instances_status: + for instance in update_share_instances: + self.db.share_instance_update( + ctxt, instance['id'], + {'status': constants.STATUS_ENSURING} + ) try: update_share_instances = self.driver.ensure_shares( ctxt, update_share_instances) or {} @@ -494,10 +509,11 @@ def ensure_driver_resources(self, ctxt): share_instance_update_dict = ( update_share_instances[share_instance['id']] ) - if share_instance_update_dict.get('status'): + backend_provided_status = share_instance_update_dict.get('status') + if backend_provided_status: self.db.share_instance_update( ctxt, share_instance['id'], - {'status': share_instance_update_dict.get('status'), + {'status': backend_provided_status, 'host': share_instance['host']} ) metadata_updates = share_instance_update_dict.get('metadata') @@ -568,6 +584,13 @@ def ensure_driver_resources(self, ctxt): "Unexpected error occurred while updating " "access rules for snapshot instance %s.", snap_instance['id']) + if not backend_provided_status and update_instances_status: + self.db.share_instance_update( + ctxt, share_instance['id'], + {'status': constants.STATUS_AVAILABLE} + ) + if do_service_status_update: + self.db.service_update(ctxt, service['id'], {'ensuring': False}) def _ensure_share(self, ctxt, share_instance): export_locations = None @@ -4368,7 +4391,7 @@ def _update_replica_snapshot(self, context, replica_snapshot, @add_hooks @utils.require_driver_initialized def update_access(self, context, share_instance_id): - """Allow/Deny access to some share.""" + """Allow/Deny/Update access to some share.""" share_instance = self._get_share_instance(context, share_instance_id) share_server_id = share_instance.get('share_server_id') @@ -6727,8 +6750,9 @@ def update_share_server_network_allocations( current_subnets = [subnet for subnet in current_subnets if subnet['id'] != new_share_network_subnet_id] share_servers = ( - self.db.share_server_get_all_by_host_and_share_subnet( - context, self.host, new_share_network_subnet_id)) + self.db.share_server_get_all_by_host_and_or_share_subnet( + context, host=self.host, + share_subnet_id=new_share_network_subnet_id)) for share_server in share_servers: share_server_id = share_server['id'] @@ -6781,3 +6805,63 @@ def update_share_server_network_allocations( # order to properly update share network status. self._check_share_network_update_finished( context, share_network_id=share_network['id']) + + def update_share_from_metadata(self, context, share_id, metadata): + share = self.db.share_get(context, share_id) + share_instance = self._get_share_instance(context, share) + share_server = self._get_share_server(context, share_instance) + try: + self.driver.update_share_from_metadata(context, share_instance, + metadata, share_server) + self.message_api.create( + context, + message_field.Action.UPDATE_METADATA, + share['project_id'], + resource_type=message_field.Resource.SHARE, + resource_id=share_id, + detail=message_field.Detail.UPDATE_METADATA_SUCCESS) + except Exception: + self.message_api.create( + context, + message_field.Action.UPDATE_METADATA, + share['project_id'], + resource_type=message_field.Resource.SHARE, + resource_id=share_id, + detail=message_field.Detail.UPDATE_METADATA_FAILURE) + + def update_share_network_subnet_from_metadata(self, context, + share_network_id, + share_network_subnet_id, + share_server_id, + metadata): + share_network = self.db.share_network_get(context, share_network_id) + share_network_subnet = self.db.share_network_subnet_get( + context, share_network_subnet_id) + share_server = self.db.share_server_get(context, share_server_id) + + try: + self.driver.update_share_network_subnet_from_metadata( + context, + share_network, + share_network_subnet, + share_server, + metadata) + self.message_api.create( + context, + message_field.Action.UPDATE_METADATA, + share_network['project_id'], + resource_type=message_field.Resource.SHARE_NETWORK_SUBNET, + resource_id=share_network_subnet_id, + detail=message_field.Detail.UPDATE_METADATA_SUCCESS) + except Exception as e: + if isinstance(e, NotImplementedError): + LOG.debug("Not passing the updates of share network subnet " + "metadata to share driver since the required driver " + "interface is not implemented.") + self.message_api.create( + context, + message_field.Action.UPDATE_METADATA, + share_network['project_id'], + resource_type=message_field.Resource.SHARE_NETWORK_SUBNET, + resource_id=share_network_subnet_id, + detail=message_field.Detail.UPDATE_METADATA_FAILURE) diff --git a/manila/share/rpcapi.py b/manila/share/rpcapi.py index 93a02b46c3..ca59d827dd 100644 --- a/manila/share/rpcapi.py +++ b/manila/share/rpcapi.py @@ -88,6 +88,9 @@ class ShareAPI(object): 1.26 - Add create_backup() and delete_backup() restore_backup() methods 1.27 - Update delete_share_instance() and delete_snapshot() methods + 1.28 - Add update_share_from_metadata() method + 1.29 - Add ensure_shares() + 1.30 - Add update_share_network_subnet_from_metadata() method """ BASE_RPC_API_VERSION = '1.0' @@ -96,7 +99,7 @@ def __init__(self, topic=None): super(ShareAPI, self).__init__() target = messaging.Target(topic=CONF.share_topic, version=self.BASE_RPC_API_VERSION) - self.client = rpc.get_client(target, version_cap='1.27') + self.client = rpc.get_client(target, version_cap='1.30') def create_share_instance(self, context, share_instance, host, request_spec, filter_properties, @@ -531,3 +534,35 @@ def restore_backup(self, context, backup, share_id): 'restore_backup', backup=backup, share_id=share_id) + + def update_share_from_metadata(self, context, share, metadata): + host = utils.extract_host(share['instance']['host']) + call_context = self.client.prepare(server=host, version='1.28') + return call_context.cast(context, + 'update_share_from_metadata', + share_id=share['id'], + metadata=metadata) + + def update_share_network_subnet_from_metadata(self, context, + share_network_id, + share_network_subnet_id, + share_server, + metadata): + host = utils.extract_host(share_server['host']) + call_context = self.client.prepare(server=host, version='1.30') + call_context.cast( + context, + 'update_share_network_subnet_from_metadata', + share_network_id=share_network_id, + share_network_subnet_id=share_network_subnet_id, + share_server_id=share_server['id'], + metadata=metadata) + + def ensure_driver_resources(self, context, host): + host = utils.extract_host(host) + call_context = self.client.prepare(server=host, version='1.29') + return call_context.cast( + context, + 'ensure_driver_resources', + skip_backend_info_check=True + ) diff --git a/manila/tests/api/v1/test_shares.py b/manila/tests/api/v1/test_shares.py index aefdcf2bdb..89bc10daaa 100644 --- a/manila/tests/api/v1/test_shares.py +++ b/manila/tests/api/v1/test_shares.py @@ -1360,6 +1360,7 @@ def _stub_deny_access(*args, **kwargs): self.mock_object(share_api.API, "deny_access", _stub_deny_access) self.mock_object(share_api.API, "access_get", _fake_access_get) + self.mock_object(self.controller, '_check_for_access_rule_locks') id = 'fake_share_id' body = {"os-deny_access": {"access_id": 'fake_acces_id'}} @@ -1377,14 +1378,20 @@ def test_deny_access_with_share_network_id(self): share_network = db_utils.create_share_network() share = db_utils.create_share(share_network_id=share_network['id']) self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) + self.mock_object(self.controller, '_check_for_access_rule_locks') id = 'fake_share_id' - body = {"os-deny_access": {"access_id": 'fake_acces_id'}} + access_data = {"access_id": 'fake_acces_id'} + body = {"os-deny_access": access_data} req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id) res = self.controller._deny_access(req, id, body) self.assertEqual(202, res.status_int) + self.controller._check_for_access_rule_locks.assert_called_once_with( + req.environ['manila.context'], access_data, + access_data['access_id'], id + ) self.mock_policy_check.assert_called_once_with( req.environ['manila.context'], 'share', 'deny_access') @@ -1394,6 +1401,7 @@ def _stub_deny_access(*args, **kwargs): self.mock_object(share_api.API, "deny_access", _stub_deny_access) self.mock_object(share_api.API, "access_get", _fake_access_get) + self.mock_object(self.controller, '_check_for_access_rule_locks') id = 'super_fake_share_id' body = {"os-deny_access": {"access_id": 'fake_acces_id'}} @@ -1441,12 +1449,14 @@ def test__check_for_access_rule_locks_no_locks(self): access_id = 'fake_access_id' share_id = 'fake_share_id' + self.mock_object(context, 'elevated', mock.Mock(return_value=context)) self.controller._check_for_access_rule_locks( context, {}, access_id, share_id) delete_search_opts = { 'resource_id': access_id, - 'resource_action': constants.RESOURCE_ACTION_DELETE + 'resource_action': constants.RESOURCE_ACTION_DELETE, + 'all_projects': True, } resource_locks.API.get_all.assert_called_once_with( @@ -1465,6 +1475,7 @@ def test__check_for_access_rules_locks_too_many_locks(self): access_id = 'fake_access_id' share_id = 'fake_share_id' + self.mock_object(context, 'elevated', mock.Mock(return_value=context)) self.assertRaises( webob.exc.HTTPForbidden, self.controller._check_for_access_rule_locks, @@ -1472,7 +1483,8 @@ def test__check_for_access_rules_locks_too_many_locks(self): delete_search_opts = { 'resource_id': access_id, - 'resource_action': constants.RESOURCE_ACTION_DELETE + 'resource_action': constants.RESOURCE_ACTION_DELETE, + 'all_projects': True, } resource_locks.API.get_all.assert_called_once_with( @@ -1497,6 +1509,7 @@ def test__check_for_access_rules_cant_manipulate_lock(self): access_id = 'fake_access_id' share_id = 'fake_share_id' + self.mock_object(context, 'elevated', mock.Mock(return_value=context)) self.assertRaises( webob.exc.HTTPForbidden, self.controller._check_for_access_rule_locks, @@ -1504,7 +1517,8 @@ def test__check_for_access_rules_cant_manipulate_lock(self): delete_search_opts = { 'resource_id': access_id, - 'resource_action': constants.RESOURCE_ACTION_DELETE + 'resource_action': constants.RESOURCE_ACTION_DELETE, + 'all_projects': True, } resource_locks.API.get_all.assert_called_once_with( @@ -1535,6 +1549,7 @@ def test__check_for_access_rules_locks_unauthorized(self): access_id = 'fake_access_id' share_id = 'fake_share_id' + self.mock_object(context, 'elevated', mock.Mock(return_value=context)) self.assertRaises( webob.exc.HTTPForbidden, self.controller._check_for_access_rule_locks, @@ -1542,7 +1557,8 @@ def test__check_for_access_rules_locks_unauthorized(self): ) delete_search_opts = { 'resource_id': access_id, - 'resource_action': constants.RESOURCE_ACTION_DELETE + 'resource_action': constants.RESOURCE_ACTION_DELETE, + 'all_projects': True, } resource_locks.API.get_all.assert_called_once_with( context, search_opts=delete_search_opts, show_count=True @@ -1569,15 +1585,19 @@ def test_check_for_access_rules_locks(self): access_id = 'fake_access_id' share_id = 'fake_share_id' + self.mock_object(context, 'elevated', mock.Mock(return_value=context)) self.controller._check_for_access_rule_locks( context, {'unrestrict': True}, access_id, share_id) delete_search_opts = { 'resource_id': access_id, - 'resource_action': constants.RESOURCE_ACTION_DELETE + 'resource_action': constants.RESOURCE_ACTION_DELETE, + 'all_projects': True, } resource_locks.API.get_all.assert_called_once_with( - context, search_opts=delete_search_opts, show_count=True) + context.elevated(), search_opts=delete_search_opts, + show_count=True + ) (resource_locks.API.ensure_context_can_delete_lock .assert_called_once_with( context, locks[0]['id'])) diff --git a/manila/tests/api/v2/test_services.py b/manila/tests/api/v2/test_services.py index 97141d637c..f1e03ac0ca 100644 --- a/manila/tests/api/v2/test_services.py +++ b/manila/tests/api/v2/test_services.py @@ -17,6 +17,7 @@ import datetime from unittest import mock +import webob import ddt from oslo_utils import timeutils @@ -158,6 +159,7 @@ 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38), }, ]} +ENSURE_SHARES_VERSION = "2.86" def fake_service_get_all(context): @@ -386,3 +388,99 @@ def test_services_list_api_not_found(self, url, version, controller): self.assertRaises( exception.VersionNotFoundForAPIMethod, controller().index, req) + + def test_ensure_shares_no_host_param(self): + req = fakes.HTTPRequest.blank( + '/fooproject/services/ensure', version=ENSURE_SHARES_VERSION) + body = {} + + self.assertRaises( + webob.exc.HTTPBadRequest, + self.controller.ensure_shares, + req, + body + ) + + def test_ensure_shares_host_not_found(self): + req = fakes.HTTPRequest.blank( + '/fooproject/services/ensure', version=ENSURE_SHARES_VERSION) + req_context = req.environ['manila.context'] + body = {'host': 'host1'} + + mock_service_get = self.mock_object( + db, 'service_get_by_args', + mock.Mock(side_effect=exception.NotFound()) + ) + + self.assertRaises( + webob.exc.HTTPNotFound, + self.controller.ensure_shares, + req, + body + ) + mock_service_get.assert_called_once_with( + req_context, + body['host'], + 'manila-share' + ) + + def test_ensure_shares_conflict(self): + req = fakes.HTTPRequest.blank( + '/fooproject/services/ensure', version=ENSURE_SHARES_VERSION) + req_context = req.environ['manila.context'] + body = {'host': 'host1'} + fake_service = {'id': 'fake_service_id'} + + mock_service_get = self.mock_object( + db, + 'service_get_by_args', + mock.Mock(return_value=fake_service) + ) + mock_ensure = self.mock_object( + self.controller.service_api, + 'ensure_shares', + mock.Mock(side_effect=webob.exc.HTTPConflict) + ) + + self.assertRaises( + webob.exc.HTTPConflict, + self.controller.ensure_shares, + req, + body + ) + mock_service_get.assert_called_once_with( + req_context, + body['host'], + 'manila-share' + ) + mock_ensure.assert_called_once_with( + req_context, fake_service, body['host'] + ) + + def test_ensure_shares(self): + req = fakes.HTTPRequest.blank( + '/fooproject/services/ensure', version=ENSURE_SHARES_VERSION) + req_context = req.environ['manila.context'] + body = {'host': 'host1'} + fake_service = {'id': 'fake_service_id'} + + mock_service_get = self.mock_object( + db, + 'service_get_by_args', + mock.Mock(return_value=fake_service) + ) + mock_ensure = self.mock_object( + self.controller.service_api, 'ensure_shares', + ) + + response = self.controller.ensure_shares(req, body) + + self.assertEqual(202, response.status_int) + mock_service_get.assert_called_once_with( + req_context, + body['host'], + 'manila-share' + ) + mock_ensure.assert_called_once_with( + req_context, fake_service, body['host'] + ) diff --git a/manila/tests/api/v2/test_share_accesses.py b/manila/tests/api/v2/test_share_accesses.py index c216145b1c..d02b036784 100644 --- a/manila/tests/api/v2/test_share_accesses.py +++ b/manila/tests/api/v2/test_share_accesses.py @@ -15,6 +15,7 @@ from unittest import mock +import copy import ddt from webob import exc @@ -237,3 +238,39 @@ def test_show_with_unsupported_version(self, version): self.controller.show, self._get_show_request(version=version), self.access['id']) + + def _get_update_request(self, access_id=None): + access_id = access_id or self.access['id'] + req = fakes.HTTPRequest.blank( + '/v2/share-access-rules/%s' % access_id, version="2.88", + experimental=True) + return req + + def test_update_access_level(self): + update_share_access = copy.deepcopy(self.access) + update_share_access.update({'access_level': 'ro'}) + self.mock_object( + self.controller.share_api, 'update_access', + mock.Mock(return_value=update_share_access)) + + body = {'update_access': {'access_level': 'ro'}} + url = self._get_update_request() + ret = self.controller.update(url, self.access['id'], body=body) + self.assertEqual(update_share_access['access_level'], + ret['access']['access_level']) + + def test_update_access_level_invalid_access_level(self): + body = {'access': {'access_level': 'fake_access'}} + self.assertRaises( + exc.HTTPBadRequest, + self.controller.update, + self._get_update_request(), self.access['id'], + body=body) + + def test_update_access_level_invalid_update_request(self): + body = {'access': {'access_key': 'xxxx'}} + self.assertRaises( + exc.HTTPBadRequest, + self.controller.update, + self._get_update_request(), self.access['id'], + body=body) diff --git a/manila/tests/api/v2/test_share_export_locations.py b/manila/tests/api/v2/test_share_export_locations.py index e2a3df0682..67111abcd6 100644 --- a/manila/tests/api/v2/test_share_export_locations.py +++ b/manila/tests/api/v2/test_share_export_locations.py @@ -249,3 +249,98 @@ def test_show_with_unsupported_version(self, version): self.share['id'], index_result['export_locations'][0]['id'] ) + + def test_validate_metadata_for_update(self): + index_result = self.controller.index(self.req, self.share['id']) + el_id = index_result['export_locations'][0]['id'] + metadata = {"foo": "bar", "preferred": "False"} + + req = fakes.HTTPRequest.blank( + '/v2/shares/%s/export_locations/%s/metadata' % ( + self.share_instance_id, el_id), + version="2.87", use_admin_context=True) + result = self.controller._validate_metadata_for_update( + req, el_id, metadata) + + self.assertEqual(metadata, result) + + def test_validate_metadata_for_update_invalid(self): + index_result = self.controller.index(self.req, self.share['id']) + el_id = index_result['export_locations'][0]['id'] + metadata = {"foo": "bar", "preferred": "False"} + + self.mock_policy_check = self.mock_object( + policy, 'check_policy', mock.Mock( + side_effect=exception.PolicyNotAuthorized( + action="update_admin_only_metadata"))) + + req = fakes.HTTPRequest.blank( + '/v2/shares/%s/export_locations/%s/metadata' % ( + self.share_instance_id, el_id), + version="2.87", use_admin_context=False) + + self.assertRaises(exc.HTTPForbidden, + self.controller._validate_metadata_for_update, + req, el_id, metadata) + self.mock_policy_check.assert_called_once_with( + req.environ['manila.context'], 'share_export_location', + 'update_admin_only_metadata') + + def test_create_metadata(self): + index_result = self.controller.index(self.req, self.share['id']) + el_id = index_result['export_locations'][0]['id'] + body = {'metadata': {'key1': 'val1', 'key2': 'val2'}} + mock_validate = self.mock_object( + self.controller, '_validate_metadata_for_update', + mock.Mock(return_value=body['metadata'])) + mock_create = self.mock_object( + self.controller, '_create_metadata', + mock.Mock(return_value=body)) + + req = fakes.HTTPRequest.blank( + '/v2/shares/%s/export_locations/%s/metadata' % ( + self.share_instance_id, el_id), + version="2.87", use_admin_context=True) + + res = self.controller.create_metadata(req, self.share['id'], el_id, + body) + self.assertEqual(body, res) + mock_validate.assert_called_once_with(req, el_id, body['metadata'], + delete=False) + mock_create.assert_called_once_with(req, el_id, body) + + def test_update_all_metadata(self): + index_result = self.controller.index(self.req, self.share['id']) + el_id = index_result['export_locations'][0]['id'] + body = {'metadata': {'key1': 'val1', 'key2': 'val2'}} + mock_validate = self.mock_object( + self.controller, '_validate_metadata_for_update', + mock.Mock(return_value=body['metadata'])) + mock_update = self.mock_object( + self.controller, '_update_all_metadata', + mock.Mock(return_value=body)) + + req = fakes.HTTPRequest.blank( + '/v2/shares/%s/export_locations/%s/metadata' % ( + self.share_instance_id, el_id), + version="2.87", use_admin_context=True) + + res = self.controller.update_all_metadata(req, self.share['id'], el_id, + body) + self.assertEqual(body, res) + mock_validate.assert_called_once_with(req, el_id, body['metadata']) + mock_update.assert_called_once_with(req, el_id, body) + + def test_delete_metadata(self): + index_result = self.controller.index(self.req, self.share['id']) + el_id = index_result['export_locations'][0]['id'] + mock_delete = self.mock_object( + self.controller, '_delete_metadata', mock.Mock()) + + req = fakes.HTTPRequest.blank( + '/v2/shares/%s/export_locations/%s/metadata/fake_key' % ( + self.share_instance_id, el_id), + version="2.87", use_admin_context=True) + self.controller.delete_metadata(req, self.share['id'], el_id, + 'fake_key') + mock_delete.assert_called_once_with(req, el_id, 'fake_key') diff --git a/manila/tests/api/v2/test_share_network_subnets.py b/manila/tests/api/v2/test_share_network_subnets.py index 94bb83f2e4..3bb3a0bbac 100644 --- a/manila/tests/api/v2/test_share_network_subnets.py +++ b/manila/tests/api/v2/test_share_network_subnets.py @@ -17,6 +17,7 @@ from unittest import mock import ddt +from oslo_config import cfg from oslo_db import exception as db_exception from manila.api import common @@ -30,6 +31,9 @@ from manila.tests import db_utils from webob import exc +CONF = cfg.CONF + + fake_az = { 'id': 'ae525e12-07e8-4ddc-a2fd-4a89ad4a65ff', 'name': 'fake_az_name' @@ -505,48 +509,87 @@ def test_index_metadata(self): mock_index.assert_called_once_with(req, self.subnet['id'], parent_id=self.share_network['id']) - def test_create_metadata(self): - req = fakes.HTTPRequest.blank('/subnets/', version="2.78") + @ddt.data("2.78", "2.89") + def test_create_metadata(self, version): + req = fakes.HTTPRequest.blank('/subnets/', version=version) + context = req.environ['manila.context'] mock_index = self.mock_object( self.controller, '_create_metadata', - mock.Mock(return_value='fake_metadata')) + mock.Mock(return_value={'metadata': 'fake_metadata'})) + mock_update = self.mock_object( + self.controller.share_api, + 'update_share_network_subnet_from_metadata') body = 'fake_metadata_body' result = self.controller.create_metadata(req, self.share_network['id'], self.subnet['id'], body) - self.assertEqual('fake_metadata', result) + self.assertEqual('fake_metadata', result['metadata']) mock_index.assert_called_once_with(req, self.subnet['id'], body, parent_id=self.share_network['id']) + metadata_support = (req.api_version_request >= + api_version.APIVersionRequest("2.89")) + if metadata_support: + mock_update.assert_called_once_with( + context, self.share_network['id'], + self.subnet['id'], 'fake_metadata') + else: + mock_update.assert_not_called() - def test_update_all_metadata(self): - req = fakes.HTTPRequest.blank('/subnets/', version="2.78") + @ddt.data("2.78", "2.89") + def test_update_all_metadata(self, version): + req = fakes.HTTPRequest.blank('/subnets/', version=version) + context = req.environ['manila.context'] mock_index = self.mock_object( self.controller, '_update_all_metadata', - mock.Mock(return_value='fake_metadata')) + mock.Mock(return_value={'metadata': 'fake_metadata'})) + mock_update = self.mock_object( + self.controller.share_api, + 'update_share_network_subnet_from_metadata') body = 'fake_metadata_body' result = self.controller.update_all_metadata( req, self.share_network['id'], self.subnet['id'], body) - self.assertEqual('fake_metadata', result) + self.assertEqual('fake_metadata', result['metadata']) mock_index.assert_called_once_with(req, self.subnet['id'], body, parent_id=self.share_network['id']) + metadata_support = (req.api_version_request >= + api_version.APIVersionRequest("2.89")) + if metadata_support: + mock_update.assert_called_once_with( + context, self.share_network['id'], + self.subnet['id'], 'fake_metadata') + else: + mock_update.assert_not_called() - def test_update_metadata_item(self): - req = fakes.HTTPRequest.blank('/subnets/', version="2.78") + @ddt.data("2.78", "2.89") + def test_update_metadata_item(self, version): + req = fakes.HTTPRequest.blank('/subnets/', version=version) + context = req.environ['manila.context'] mock_index = self.mock_object( self.controller, '_update_metadata_item', - mock.Mock(return_value='fake_metadata')) + mock.Mock(return_value={'metadata': 'fake_metadata'})) + mock_update = self.mock_object( + self.controller.share_api, + 'update_share_network_subnet_from_metadata') body = 'fake_metadata_body' key = 'fake_key' result = self.controller.update_metadata_item( req, self.share_network['id'], self.subnet['id'], body, key) - self.assertEqual('fake_metadata', result) + self.assertEqual('fake_metadata', result['metadata']) mock_index.assert_called_once_with(req, self.subnet['id'], body, key, parent_id=self.share_network['id']) + metadata_support = (req.api_version_request >= + api_version.APIVersionRequest("2.89")) + if metadata_support: + mock_update.assert_called_once_with( + context, self.share_network['id'], + self.subnet['id'], 'fake_metadata') + else: + mock_update.assert_not_called() def test_show_metadata(self): req = fakes.HTTPRequest.blank('/subnets/', version="2.78") @@ -562,16 +605,32 @@ def test_show_metadata(self): mock_index.assert_called_once_with(req, self.subnet['id'], key, parent_id=self.share_network['id']) - def test_delete_metadata(self): - req = fakes.HTTPRequest.blank('/subnets/', version="2.78") + @ddt.data("2.78", "2.89") + def test_delete_metadata(self, version): + req = fakes.HTTPRequest.blank('/subnets/', version=version) + context = req.environ['manila.context'] mock_index = self.mock_object( self.controller, '_delete_metadata', mock.Mock(return_value='fake_metadata')) + mock_sn_get = self.mock_object( + db_api, 'share_network_get', mock.Mock( + return_value=self.share_network)) key = 'fake_key' + CONF.set_default( + "driver_updatable_subnet_metadata", ['fake_key', 'fake_key2']) + result = self.controller.delete_metadata( req, self.share_network['id'], self.subnet['id'], key) self.assertEqual('fake_metadata', result) mock_index.assert_called_once_with(req, self.subnet['id'], key, parent_id=self.share_network['id']) + + metadata_support = (req.api_version_request >= + api_version.APIVersionRequest("2.89")) + if metadata_support: + mock_sn_get.assert_called_once_with( + context, self.share_network['id']) + else: + mock_sn_get.assert_not_called() diff --git a/manila/tests/api/v2/test_shares.py b/manila/tests/api/v2/test_shares.py index f13ce4e456..38265bbb9b 100644 --- a/manila/tests/api/v2/test_shares.py +++ b/manila/tests/api/v2/test_shares.py @@ -2251,6 +2251,53 @@ def test_remove_invalid_options_admin(self): common.remove_invalid_options(ctx, search_opts, allowed_opts) self.assertEqual(expected_opts, search_opts) + def test_create_metadata(self): + id = 'fake_share_id' + body = {'metadata': {'key1': 'val1', 'key2': 'val2'}} + mock_validate = self.mock_object( + self.controller, '_validate_metadata_for_update', + mock.Mock(return_value=body['metadata'])) + mock_create = self.mock_object( + self.controller, '_create_metadata', + mock.Mock(return_value=body)) + self.mock_object(share_api.API, 'update_share_from_metadata') + + req = fakes.HTTPRequest.blank( + '/v2/shares/%s/metadata' % id) + + res = self.controller.create_metadata(req, id, body) + self.assertEqual(body, res) + mock_validate.assert_called_once_with(req, id, body['metadata'], + delete=False) + mock_create.assert_called_once_with(req, id, body) + + def test_update_all_metadata(self): + id = 'fake_share_id' + body = {'metadata': {'key1': 'val1', 'key2': 'val2'}} + mock_validate = self.mock_object( + self.controller, '_validate_metadata_for_update', + mock.Mock(return_value=body['metadata'])) + mock_update = self.mock_object( + self.controller, '_update_all_metadata', + mock.Mock(return_value=body)) + self.mock_object(share_api.API, 'update_share_from_metadata') + + req = fakes.HTTPRequest.blank( + '/v2/shares/%s/metadata' % id) + res = self.controller.update_all_metadata(req, id, body) + self.assertEqual(body, res) + mock_validate.assert_called_once_with(req, id, body['metadata']) + mock_update.assert_called_once_with(req, id, body) + + def test_delete_metadata(self): + mock_delete = self.mock_object( + self.controller, '_delete_metadata', mock.Mock()) + + req = fakes.HTTPRequest.blank( + '/v2/shares/%s/metadata/fake_key' % id) + self.controller.delete_metadata(req, id, 'fake_key') + mock_delete.assert_called_once_with(req, id, 'fake_key') + def _fake_access_get(self, ctxt, access_id): diff --git a/manila/tests/db/sqlalchemy/test_api.py b/manila/tests/db/sqlalchemy/test_api.py index a1d722ead9..437070dbc1 100644 --- a/manila/tests/db/sqlalchemy/test_api.py +++ b/manila/tests/db/sqlalchemy/test_api.py @@ -2499,7 +2499,6 @@ def test_export_location_metadata_update_delete(self): self.assertEqual({}, result) def test_export_location_metadata_update_get(self): - # Write metadata for target export location export_location_uuid = self._get_export_location_uuid_by_path( self.initial_locations[0]) @@ -2532,6 +2531,29 @@ def test_export_location_metadata_update_get(self): self.assertEqual(updated_metadata, result) + def test_export_location_metadata_get_item(self): + export_location_uuid = self._get_export_location_uuid_by_path( + self.initial_locations[0]) + metadata = {'foo_key': 'foo_value', 'bar_key': 'bar_value'} + db_api.export_location_metadata_update( + self.ctxt, export_location_uuid, metadata, False) + result = db_api.export_location_metadata_get_item( + self.ctxt, export_location_uuid, 'foo_key') + self.assertEqual( + {'foo_key': 'foo_value'}, result) + + def test_export_location_metadata_get_item_invalid(self): + export_location_uuid = self._get_export_location_uuid_by_path( + self.initial_locations[0]) + metadata = {'foo_key': 'foo_value', 'bar_key': 'bar_value'} + db_api.export_location_metadata_update( + self.ctxt, export_location_uuid, metadata, False) + self.assertRaises(exception.MetadataItemNotFound, + db_api.export_location_metadata_get_item, + self.ctxt, + export_location_uuid, + 'foo') + @ddt.data( ("k", "v"), ("k" * 256, "v"), @@ -3756,7 +3778,7 @@ def test_get_all_by_host_and_share_subnet(self): invalid = db_utils.create_share_server(**invalid) other = db_utils.create_share_server(**other) - servers = db_api.share_server_get_all_by_host_and_share_subnet( + servers = db_api.share_server_get_all_by_host_and_or_share_subnet( self.ctxt, host='host1', share_subnet_id='1') @@ -3770,7 +3792,7 @@ def test_get_all_by_host_and_share_subnet(self): def test_get_all_by_host_and_share_subnet_not_found(self): self.assertRaises( exception.ShareServerNotFound, - db_api.share_server_get_all_by_host_and_share_subnet, + db_api.share_server_get_all_by_host_and_or_share_subnet, self.ctxt, host='fake', share_subnet_id='fake' ) diff --git a/manila/tests/lock/test_api.py b/manila/tests/lock/test_api.py index e3a6c09131..18d6e6b1b3 100644 --- a/manila/tests/lock/test_api.py +++ b/manila/tests/lock/test_api.py @@ -517,3 +517,45 @@ def test_delete(self): utils.IsAMatcher(context.RequestContext), 'd767d3cd-1187-404a-a91f-8b172e0e768e' ) + + def test_ensure_context_can_delete_lock_policy_fails(self): + lock = {'id': 'd767d3cd-1187-404a-a91f-8b172e0e768e'} + self.mock_object( + self.lock_api.db, 'resource_lock_get', mock.Mock(return_value=lock) + ) + self.mock_object( + policy, + 'check_policy', + mock.Mock(side_effect=exception.PolicyNotAuthorized( + action="resource_lock:delete")), + ) + + self.assertRaises( + exception.NotAuthorized, + self.lock_api.ensure_context_can_delete_lock, + self.ctxt, + 'd767d3cd-1187-404a-a91f-8b172e0e768e') + + self.lock_api.db.resource_lock_get.assert_called_once_with( + self.ctxt, 'd767d3cd-1187-404a-a91f-8b172e0e768e' + ) + policy.check_policy.assert_called_once_with( + self.ctxt, 'resource_lock', 'delete', lock) + + def test_ensure_context_can_delete_lock(self): + lock = {'id': 'd767d3cd-1187-404a-a91f-8b172e0e768e'} + self.mock_object( + self.lock_api.db, 'resource_lock_get', mock.Mock(return_value=lock) + ) + self.mock_object(policy, 'check_policy') + self.mock_object(self.lock_api, '_check_allow_lock_manipulation') + + self.lock_api.ensure_context_can_delete_lock( + self.ctxt, + 'd767d3cd-1187-404a-a91f-8b172e0e768e') + + self.lock_api.db.resource_lock_get.assert_called_once_with( + self.ctxt, 'd767d3cd-1187-404a-a91f-8b172e0e768e' + ) + policy.check_policy.assert_called_once_with( + self.ctxt, 'resource_lock', 'delete', lock) diff --git a/manila/tests/services/__init__.py b/manila/tests/services/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/manila/tests/services/test_api.py b/manila/tests/services/test_api.py new file mode 100644 index 0000000000..7ea489e03c --- /dev/null +++ b/manila/tests/services/test_api.py @@ -0,0 +1,61 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock +from webob import exc + +from manila import context +from manila.services import api as service_api +from manila import test + + +class ServicesApiTest(test.TestCase): + + def setUp(self): + super(ServicesApiTest, self).setUp() + self.context = context.get_admin_context() + self.share_rpcapi = mock.Mock() + self.share_rpcapi.ensure_shares = mock.Mock() + self.services_api = service_api.API() + self.mock_object( + self.services_api, 'share_rpcapi', self.share_rpcapi + ) + + def test_ensure_shares(self): + host = 'fake_host@fakebackend' + fake_service = { + 'id': 'fake_service_id', + 'state': 'up' + } + + self.services_api.ensure_shares(self.context, fake_service, host) + + self.share_rpcapi.ensure_driver_resources.assert_called_once_with( + self.context, host + ) + + def test_ensure_shares_host_down(self): + host = 'fake_host@fakebackend' + fake_service = { + 'id': 'fake_service_id', + 'state': 'down' + } + + self.assertRaises( + exc.HTTPConflict, + self.services_api.ensure_shares, + self.context, + fake_service, + host + ) + + self.share_rpcapi.ensure_shares.assert_not_called() diff --git a/manila/tests/share/drivers/cephfs/test_driver.py b/manila/tests/share/drivers/cephfs/test_driver.py index fbcdfdf2a1..2450148aea 100644 --- a/manila/tests/share/drivers/cephfs/test_driver.py +++ b/manila/tests/share/drivers/cephfs/test_driver.py @@ -226,14 +226,15 @@ def test_update_access(self): } add_rules = access_rules = [alice, ] delete_rules = [] + update_rules = [] self._driver.update_access( self._context, self._share, access_rules, add_rules, delete_rules, - None) + update_rules, None) self._driver.protocol_helper.update_access.assert_called_once_with( self._context, self._share, access_rules, add_rules, delete_rules, - share_server=None) + update_rules, share_server=None) def test_ensure_shares(self): self._driver.protocol_helper.reapply_rules_while_ensuring_shares = True @@ -868,7 +869,9 @@ def test_update_access_add_rm(self): self._share, access_rules=[alice, manila, admin, dabo], add_rules=[alice, manila, admin, dabo], - delete_rules=[bob]) + delete_rules=[bob], + update_rules=[], + ) expected_access_updates = { 'accessid1': {'access_key': 'abc123'}, @@ -936,7 +939,7 @@ def test_update_access_all(self): access_updates = self._native_protocol_helper.update_access( self._context, self._share, access_rules=[alice], add_rules=[], - delete_rules=[]) + delete_rules=[], update_rules=[]) self.assertEqual( {'accessid1': {'access_key': 'abc123'}}, access_updates) @@ -1377,7 +1380,6 @@ def test_allow_access_rw_ro(self, mode): "pseudo": "ganesha:/foo/bar", "squash": "none", "security_label": True, - "protocols": [4], "fsal": { "name": "CEPH", "fs_name": volname, diff --git a/manila/tests/share/drivers/container/test_driver.py b/manila/tests/share/drivers/container/test_driver.py index e0f732b892..5283c2b22a 100644 --- a/manila/tests/share/drivers/container/test_driver.py +++ b/manila/tests/share/drivers/container/test_driver.py @@ -267,7 +267,7 @@ def test_update_access_access_rules_ok(self): self._driver.update_access(self._context, self.share, [{'access_level': const.ACCESS_LEVEL_RW}], - [], [], {"id": "fake"}) + [], [], [], {"id": "fake"}) helper.update_access.assert_called_with('manila_fake', fake_share_name, [{'access_level': 'rw'}], diff --git a/manila/tests/share/drivers/dell_emc/plugins/unity/test_utils.py b/manila/tests/share/drivers/dell_emc/plugins/unity/test_utils.py index 709481300b..84fc723676 100644 --- a/manila/tests/share/drivers/dell_emc/plugins/unity/test_utils.py +++ b/manila/tests/share/drivers/dell_emc/plugins/unity/test_utils.py @@ -54,6 +54,24 @@ def parent_storage_processor(self): SPB_LA1 = MockPort(SPB, 'spb_la_1', 1500) +class MockPortV5(object): + def __init__(self, sp, port_id, mtu): + self._sp = sp + self.port_id = port_id + self.mtu = mtu + + def get_id(self): + return self.port_id + + @property + def storage_processor(self): + return self._sp + + +SPA_LA2 = MockPort(SPA, 'spa_la_2', 1500) +SPB_LA2 = MockPort(SPB, 'spb_la_2', 1500) + + @ddt.ddt class TestUtils(test.TestCase): @ddt.data({'matcher': None, @@ -96,6 +114,10 @@ def test_do_match(self, data): 'ids_conf': ['spa*'], 'port_map': {'spa': {'spa_eth0', 'spa_eth1'}}, 'unmanaged': {'spb_eth0'}}, + {'ports': [SPA_LA2, SPB_LA2], + 'ids_conf': None, + 'port_map': {'spa': {'spa_la_2'}, 'spb': {'spb_la_2'}}, + 'unmanaged': set()}, ) @ddt.unpack def test_match_ports(self, ports, ids_conf, port_map, unmanaged): diff --git a/manila/tests/share/drivers/dell_emc/test_driver.py b/manila/tests/share/drivers/dell_emc/test_driver.py index 9a79ab5dde..913b862b40 100644 --- a/manila/tests/share/drivers/dell_emc/test_driver.py +++ b/manila/tests/share/drivers/dell_emc/test_driver.py @@ -230,7 +230,8 @@ def test_support_manage(self): access = mock.Mock() self.driver.allow_access(context, share, access, share_server) self.driver.deny_access(context, share, access, share_server) - self.driver.update_access(context, share, None, None, share_server) + self.driver.update_access(context, share, None, None, + None, share_server) self.driver.check_for_setup_error() self.driver.get_network_allocations_number() self.driver._teardown_server(None) diff --git a/manila/tests/share/drivers/dummy.py b/manila/tests/share/drivers/dummy.py index afd451bf26..2033d973f6 100644 --- a/manila/tests/share/drivers/dummy.py +++ b/manila/tests/share/drivers/dummy.py @@ -89,6 +89,8 @@ "create_backup": "1.50", "restore_backup": "1.50", + "update_share_network_subnet_from_metadata": "0.5", + }, ), ] @@ -326,7 +328,7 @@ def ensure_share(self, context, share, share_server=None): @slow_me_down def update_access(self, context, share, access_rules, add_rules, - delete_rules, share_server=None): + delete_rules, update_rules, share_server=None): """Update access rules for given share.""" for rule in add_rules + access_rules: share_proto = share["share_proto"].lower() @@ -1054,3 +1056,20 @@ def restore_backup_continue(self, context, backup, share_instance, {'backup': backup['id'], 'share': share_instance['share_id']}) return {'total_progress': '100'} + + def update_share_from_metadata(self, context, share_instance, metadata, + share_server=None): + LOG.debug("Updated share %(share)s. Metadata %(metadata)s " + "applied successfully.", + {'share': share_instance['share_id'], + 'metadata': metadata}) + + @slow_me_down + def update_share_network_subnet_from_metadata(self, context, + share_network, + share_network_subnet, + share_servers, metadata): + LOG.debug("Updated share network subnet %(sn_sub)s. Metadata " + "%(metadata)s applied successfully.", + {'sn_sub': share_network_subnet['id'], + 'metadata': metadata}) diff --git a/manila/tests/share/drivers/glusterfs/test_layout.py b/manila/tests/share/drivers/glusterfs/test_layout.py index 327f5f0c1f..454132cf58 100644 --- a/manila/tests/share/drivers/glusterfs/test_layout.py +++ b/manila/tests/share/drivers/glusterfs/test_layout.py @@ -159,7 +159,8 @@ def test_update_access(self, inset, outset, recovery): ] for r in rs ] for rs in (inset, outset)) - _driver.update_access(self.fake_context, self.fake_share, *in_rules) + _driver.update_access( + self.fake_context, self.fake_share, *in_rules, []) _layout._share_manager.assert_called_once_with(self.fake_share) _driver._update_access_via_manager.assert_called_once_with( diff --git a/manila/tests/share/drivers/hitachi/hnas/test_driver.py b/manila/tests/share/drivers/hitachi/hnas/test_driver.py index e9d627a984..7819f99d30 100644 --- a/manila/tests/share/drivers/hitachi/hnas/test_driver.py +++ b/manila/tests/share/drivers/hitachi/hnas/test_driver.py @@ -280,7 +280,8 @@ def test_update_access_nfs(self, empty_rules): self.mock_object(ssh.HNASSSHBackend, "update_nfs_access_rule", mock.Mock()) - self._driver.update_access('context', share_nfs, access_list, [], []) + self._driver.update_access('context', share_nfs, access_list, + [], [], []) ssh.HNASSSHBackend.update_nfs_access_rule.assert_called_once_with( access_list_updated, share_id=share_nfs['id']) @@ -301,7 +302,7 @@ def test_update_access_ip_exception(self): self.assertRaises(exception.InvalidShareAccess, self._driver.update_access, 'context', share_nfs, - access_list, [], []) + access_list, [], [], []) def test_update_access_not_found_exception(self): access1 = { @@ -321,7 +322,8 @@ def test_update_access_not_found_exception(self): self.assertRaises(exception.ShareResourceNotFound, self._driver.update_access, 'context', share_nfs, - access_list, add_rules=[], delete_rules=[]) + access_list, add_rules=[], delete_rules=[], + update_rules=[]) @ddt.data([access_cifs_rw, 'acr'], [access_cifs_ro, 'ar']) @ddt.unpack @@ -331,7 +333,7 @@ def test_allow_access_cifs(self, access_cifs, permission): self.mock_object(ssh.HNASSSHBackend, 'cifs_allow_access') self._driver.update_access('context', share_cifs, [], - access_list_allow, []) + access_list_allow, [], []) ssh.HNASSSHBackend.cifs_allow_access.assert_called_once_with( share_cifs['id'], 'fake_user', permission, is_snapshot=False) @@ -349,7 +351,7 @@ def test_allow_access_cifs_invalid_type(self): self.assertRaises(exception.InvalidShareAccess, self._driver.update_access, 'context', share_cifs, - [], access_list_allow, []) + [], access_list_allow, [], []) def test_deny_access_cifs(self): access_list_deny = [access_cifs_rw] @@ -357,7 +359,7 @@ def test_deny_access_cifs(self): self.mock_object(ssh.HNASSSHBackend, 'cifs_deny_access') self._driver.update_access('context', share_cifs, [], [], - access_list_deny) + access_list_deny, []) ssh.HNASSSHBackend.cifs_deny_access.assert_called_once_with( share_cifs['id'], 'fake_user', is_snapshot=False) @@ -376,14 +378,14 @@ def test_deny_access_cifs_unsupported_type(self): self.mock_object(ssh.HNASSSHBackend, 'cifs_deny_access') self._driver.update_access('context', share_cifs, [], [], - access_list_deny) + access_list_deny, []) self.assertTrue(self.mock_log.warning.called) def test_update_access_invalid_share_protocol(self): self.mock_object(self._driver, '_ensure_share') ex = self.assertRaises(exception.ShareBackendException, self._driver.update_access, 'context', - invalid_share, [], [], []) + invalid_share, [], [], [], []) self.assertEqual(invalid_protocol_msg, ex.msg) def test_update_access_cifs_recovery_mode(self): @@ -395,7 +397,8 @@ def test_update_access_cifs_recovery_mode(self): self.mock_object(ssh.HNASSSHBackend, 'cifs_deny_access') self.mock_object(ssh.HNASSSHBackend, 'cifs_allow_access') - self._driver.update_access('context', share_cifs, access_list, [], []) + self._driver.update_access('context', share_cifs, access_list, + [], [], []) ssh.HNASSSHBackend.list_cifs_permissions.assert_called_once_with( share_cifs['id']) diff --git a/manila/tests/share/drivers/hitachi/hsp/test_driver.py b/manila/tests/share/drivers/hitachi/hsp/test_driver.py index 1df887ab18..c1f5f27335 100644 --- a/manila/tests/share/drivers/hitachi/hsp/test_driver.py +++ b/manila/tests/share/drivers/hitachi/hsp/test_driver.py @@ -81,7 +81,7 @@ def test_update_access_add(self, add_rule): side_effect=add_rule)) self._driver.update_access('context', self.fake_share_instance, [], - access_list, []) + access_list, [], []) self.assertTrue(self.mock_log.debug.called) @@ -113,7 +113,7 @@ def test_update_access_add_exception(self): self.assertRaises(exception.HSPBackendException, self._driver.update_access, 'context', - self.fake_share_instance, [], access_list, []) + self.fake_share_instance, [], access_list, [], []) rest.HSPRestBackend.get_file_system.assert_called_once_with( self.fake_share_instance['id']) @@ -147,7 +147,7 @@ def test_update_access_recovery(self): self.mock_object(rest.HSPRestBackend, "add_access_rule") self._driver.update_access('context', self.fake_share_instance, - access_list, [], []) + access_list, [], [], []) self.assertTrue(self.mock_log.debug.called) @@ -191,7 +191,7 @@ def test_update_access_delete(self, delete_rule): mock.Mock(return_value=fakes.hsp_rules)) self._driver.update_access('context', self.fake_share_instance, [], [], - delete_rules) + delete_rules, []) self.assertTrue(self.mock_log.debug.called) @@ -231,7 +231,7 @@ def test_update_access_delete_exception(self): self.assertRaises(exception.HSPBackendException, self._driver.update_access, 'context', - self.fake_share_instance, [], [], delete_rules) + self.fake_share_instance, [], [], delete_rules, []) self.assertTrue(self.mock_log.debug.called) @@ -262,9 +262,9 @@ def test_update_access_ip_exception(self, is_recovery): mock.Mock(return_value=fakes.hsp_rules)) if is_recovery: - access_args = [access_list, [], []] + access_args = [access_list, [], [], []] else: - access_args = [[], access_list, []] + access_args = [[], access_list, [], []] self.assertRaises(exception.InvalidShareAccess, self._driver.update_access, 'context', @@ -287,7 +287,7 @@ def test_update_access_not_found_exception(self): self.assertRaises(exception.ShareResourceNotFound, self._driver.update_access, 'context', - self.fake_share_instance, access_list, [], []) + self.fake_share_instance, access_list, [], [], []) rest.HSPRestBackend.get_file_system.assert_called_once_with( self.fake_share_instance['id']) diff --git a/manila/tests/share/drivers/ibm/test_gpfs.py b/manila/tests/share/drivers/ibm/test_gpfs.py index 58a9dd437c..3484e38e71 100644 --- a/manila/tests/share/drivers/ibm/test_gpfs.py +++ b/manila/tests/share/drivers/ibm/test_gpfs.py @@ -460,6 +460,7 @@ def test_update_access_allow(self): ["ignored"], [self.access], [], + [], share_server=None) self._helper_fake.allow_access.assert_called_once_with( @@ -478,6 +479,7 @@ def test_update_access_deny(self): ["ignored"], [], [self.access], + [], share_server=None) self._helper_fake.deny_access.assert_called_once_with( @@ -500,6 +502,7 @@ def test_update_access_both(self): ["ignore"], [access_1], [access_2], + [], share_server=None) self.assertFalse(self._helper_fake.resync_access.called) @@ -524,6 +527,7 @@ def test_update_access_resync(self): [access_1, access_2], [], [], + [], share_server=None) self._helper_fake.resync_access.assert_called_once_with( diff --git a/manila/tests/share/drivers/infinidat/test_infinidat.py b/manila/tests/share/drivers/infinidat/test_infinidat.py index b0408a3cc4..3049389a2a 100644 --- a/manila/tests/share/drivers/infinidat/test_infinidat.py +++ b/manila/tests/share/drivers/infinidat/test_infinidat.py @@ -724,7 +724,7 @@ def test_update_access(self): {'access_level': constants.ACCESS_LEVEL_RO, 'access_to': '5.6.7.8/28', 'access_type': 'ip'}] - self.driver.update_access(None, test_share, access_rules, [], []) + self.driver.update_access(None, test_share, access_rules, [], [], []) permissions = self._mock_filesystem.get_exports()[0].get_permissions() # now we are supposed to have three permissions: @@ -763,7 +763,7 @@ def test_update_access_share_doesnt_exist(self): 'access_type': 'ip'}] self.assertRaises(exception.ShareResourceNotFound, self.driver.update_access, None, test_share, - access_rules, [], []) + access_rules, [], [], []) def test_update_access_api_fail(self): self._mock_filesystem.get_exports.side_effect = self._raise_infinisdk @@ -779,7 +779,7 @@ def test_update_access_api_fail(self): 'access_type': 'ip'}] self.assertRaises(exception.ShareBackendException, self.driver.update_access, None, test_share, - access_rules, [], []) + access_rules, [], [], []) def test_update_access_fails_non_ip_access_type(self): access_rules = [ @@ -788,7 +788,7 @@ def test_update_access_fails_non_ip_access_type(self): 'access_type': 'user'}] self.assertRaises(exception.InvalidShareAccess, self.driver.update_access, None, test_share, - access_rules, [], []) + access_rules, [], [], []) def test_update_access_fails_invalid_ip(self): access_rules = [ @@ -797,7 +797,7 @@ def test_update_access_fails_invalid_ip(self): 'access_type': 'ip'}] self.assertRaises(ValueError, self.driver.update_access, None, test_share, - access_rules, [], []) + access_rules, [], [], []) def test_snapshot_update_access(self): access_rules = [ diff --git a/manila/tests/share/drivers/inspur/as13000/test_as13000_nas.py b/manila/tests/share/drivers/inspur/as13000/test_as13000_nas.py index 7f15100f0e..27d8ff0d76 100644 --- a/manila/tests/share/drivers/inspur/as13000/test_as13000_nas.py +++ b/manila/tests/share/drivers/inspur/as13000/test_as13000_nas.py @@ -692,10 +692,10 @@ def test_update_access(self, share_proto, use_access): 'send_rest_api') if use_access: self.driver.update_access(self._ctxt, share_instance, - access_rules, [], []) + access_rules, [], [], []) else: self.driver.update_access(self._ctxt, share_instance, - [], add_rules, del_rules) + [], add_rules, del_rules, []) access_clients = [{'name': rule['access_to'], 'type': 0 if share_proto == 'nfs' else 1, diff --git a/manila/tests/share/drivers/inspur/instorage/test_instorage.py b/manila/tests/share/drivers/inspur/instorage/test_instorage.py index 1c3af3624c..d7e81d70e0 100644 --- a/manila/tests/share/drivers/inspur/instorage/test_instorage.py +++ b/manila/tests/share/drivers/inspur/instorage/test_instorage.py @@ -243,7 +243,8 @@ def test_update_access(self): instorage.InStorageAssistant, 'update_access' ) - self.driver.update_access(self._ctxt, self.share_instance, [], [], []) + self.driver.update_access( + self._ctxt, self.share_instance, [], [], [], []) mock_ua.assert_called_once_with( 'fakeinstanceid', 'fake_proto', [], [], [] diff --git a/manila/tests/share/drivers/macrosan/test_macrosan_nas.py b/manila/tests/share/drivers/macrosan/test_macrosan_nas.py index 66bbe4b666..0cbe5ea564 100644 --- a/manila/tests/share/drivers/macrosan/test_macrosan_nas.py +++ b/manila/tests/share/drivers/macrosan/test_macrosan_nas.py @@ -917,7 +917,7 @@ def test_update_access_add_delete(self): self.mock_object(macrosan_helper.MacrosanHelper, '_deny_access') self.driver.update_access(self._context, share, - None, add_rules, delete_rules) + None, add_rules, delete_rules, None) @ddt.data('nfs', 'cifs') def test_update_access_nfs(self, proto): @@ -942,7 +942,7 @@ def test_update_access_nfs(self, proto): self.mock_object(macrosan_helper.MacrosanHelper, '_allow_access') self.driver.update_access(self._context, share, - access_rules, {}, {}) + access_rules, {}, {}, {}) mock_ca.assert_called_once_with(share, None) def test_update_access_fail(self): @@ -959,7 +959,7 @@ def test_update_access_fail(self): mock.Mock(side_effect=exception.InvalidShareAccess( reason='fake_exception'))) result = self.driver.update_access(self._context, share, - access_rules, None, None) + access_rules, None, None, None) expect = { 'fakeid': { 'state': 'error', @@ -983,7 +983,7 @@ def test_update_access_add_fail(self): self.mock_object(macrosan_helper.MacrosanHelper, '_deny_access') result = self.driver.update_access(self._context, share, - None, add_rules, delete_rules) + None, add_rules, delete_rules, None) expect = { 'fakeid': { 'state': 'error' diff --git a/manila/tests/share/drivers/maprfs/test_maprfs.py b/manila/tests/share/drivers/maprfs/test_maprfs.py index aecbb27fc8..eaef7d8795 100644 --- a/manila/tests/share/drivers/maprfs/test_maprfs.py +++ b/manila/tests/share/drivers/maprfs/test_maprfs.py @@ -299,7 +299,7 @@ def test_update_access_add(self): self._driver._maprfs_util._execute = mock.Mock(return_value=('', 0)) self._driver.update_access(self._context, self.share, [self.access], - [self.access], []) + [self.access], [], []) self._driver._maprfs_util._execute.assert_any_call( self.maprcli_bin, 'volume', 'modify', '-name', volume, '-readAce', @@ -321,7 +321,7 @@ def test_update_access_add_no_user_no_group_exists(self): self._driver._maprfs_util._execute = mock.Mock(return_value=('', 0)) self._driver.update_access(self._context, self.share, [self.access], - [self.access], []) + [self.access], [], []) self._driver._maprfs_util._execute.assert_any_call( self.maprcli_bin, 'volume', 'modify', '-name', volume, '-readAce', @@ -342,7 +342,7 @@ def test_update_access_delete(self): self._driver._maprfs_util._execute = mock.Mock(return_value=('', 0)) self._driver.update_access(self._context, self.share, [], [], - [self.access]) + [self.access], []) self._driver._maprfs_util._execute.assert_any_call( self.maprcli_bin, 'volume', 'modify', '-name', volume, '-readAce', @@ -364,7 +364,7 @@ def test_update_access_recover(self): self._driver._maprfs_util._execute = mock.Mock(return_value=('', 0)) self._driver.update_access(self._context, self.share, [self.access], - [], []) + [], [], []) self._driver._maprfs_util._execute.assert_any_call( self.maprcli_bin, 'volume', 'modify', '-name', volume, '-readAce', @@ -378,7 +378,7 @@ def test_update_access_share_not_exists(self): self._driver._maprfs_util._execute = mock.Mock(return_value=('', 0)) self._driver.update_access(self._context, self.share, [self.access], - [], []) + [], [], []) self._driver._maprfs_util._execute.assert_not_called() @@ -397,7 +397,7 @@ def test_update_access_exception(self): self.assertRaises(exception.MapRFSException, self._driver.update_access, self._context, - self.share, [self.access], [], []) + self.share, [self.access], [], [], []) def test_update_access_invalid_access(self): access = fake_share.fake_access(access_type='ip', access_to='fake', @@ -405,7 +405,7 @@ def test_update_access_invalid_access(self): self.assertRaises(exception.InvalidShareAccess, self._driver.update_access, self._context, - self.share, [access], [], []) + self.share, [access], [], [], []) def test_ensure_share(self): self._driver._maprfs_util.volume_exists = mock.Mock( diff --git a/manila/tests/share/drivers/netapp/dataontap/client/fakes.py b/manila/tests/share/drivers/netapp/dataontap/client/fakes.py index 5a7dc77fe0..5c699f7d27 100644 --- a/manila/tests/share/drivers/netapp/dataontap/client/fakes.py +++ b/manila/tests/share/drivers/netapp/dataontap/client/fakes.py @@ -77,6 +77,9 @@ LANGUAGE = 'fake_language' SNAPSHOT_POLICY_NAME = 'fake_snapshot_policy' EXPORT_POLICY_NAME = 'fake_export_policy' +VOLUME_EFFICIENCY_POLICY_NAME = 'fake_volume_efficiency_policy' +SHARE_MOUNT_POINT = 'fake_mount_point' + DELETED_EXPORT_POLICIES = { VSERVER_NAME: [ 'deleted_manila_fake_policy_1', @@ -320,6 +323,22 @@ 'aggr2': SHARE_AGGREGATE_NAMES[1], }) +VSERVER_SHOW_AGGR_GET_RESPONSE = etree.XML(""" + + + + fake_aggr + ssd + 3393178406912 + false + compliance + os_vs + + + 1 + +""") + SECURITY_CERT_GET_RESPONSE = etree.XML(""" @@ -1269,7 +1288,8 @@ online - false + true + enterprise 0 @@ -1391,7 +1411,8 @@ online - false + true + compliance 0 @@ -1515,7 +1536,8 @@ online - false + true + compliance 0 @@ -2357,6 +2379,9 @@ %(qos-policy-group-name)s + + compliance + 1 @@ -2393,6 +2418,9 @@ %(qos-policy-group-name)s + + compliance + 1 @@ -2424,6 +2452,9 @@ %(size)s %(size-used)s + + compliance + 1 @@ -3054,6 +3085,31 @@ false """ +SNAPLOCK_CLOCK_CONFIG_1 = etree.XML(""" + + + + %(clock_info)s + 1723063070 + + + """ % { + 'clock_info': 'Wed Aug 07 16:37:50' +}) + + +SNAPLOCK_CLOCK_CONFIG_2 = etree.XML(""" + + + + %(clock_info)s + 1723063070 + + + """ % { + 'clock_info': 'not configured' +}) + FAKE_XML1 = """\ abc\ abc\ @@ -3595,6 +3651,8 @@ "home_node": { "name": "fake_home_node_name" }, + "snaplock_type": "enterprise", + "is_snaplock": True, "space": { "footprint": 702764609536, "footprint_percent": 55, @@ -3731,7 +3789,7 @@ }, "home_port": { "name": PORT - } + }, } } ], @@ -3764,6 +3822,9 @@ "size": 21474836480, 'used': SHARE_USED_SIZE, }, + "snaplock": { + "type": "compliance" + } } ], "num_records": 1, @@ -4811,6 +4872,9 @@ 'space': { 'size': SHARE_SIZE, 'used': SHARE_USED_SIZE, + }, + 'snaplock': { + 'type': "compliance" } } ], @@ -4842,3 +4906,22 @@ } ], } + +STORAGE_FAIL_OVER_PARTNER = etree.XML(""" + + fake_partner_node + """) + + +DATA_LIF_CAPACITY_DETAILS = etree.XML(""" + + + + 512 + 44 + 512 + fake_node + + + 1 + """) diff --git a/manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode.py b/manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode.py index 75cb435bdb..70b35627ad 100644 --- a/manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode.py +++ b/manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode.py @@ -2534,6 +2534,19 @@ def test_setup_security_services_invalid(self): self.client.send_request.assert_has_calls([ mock.call('vserver-modify', vserver_modify_args)]) + def test_update_showmount(self): + + self.mock_object(self.client, 'send_request') + + fake_showmount = 'true' + self.client.update_showmount(fake_showmount) + + nfs_service_modify_args = { + 'showmount': fake_showmount, + } + self.client.send_request.assert_called_once_with( + 'nfs-service-modify', nfs_service_modify_args) + @ddt.data({'tcp-max-xfer-size': 10000}, {}, None) def test_enable_nfs(self, nfs_config): @@ -2975,7 +2988,91 @@ def test_is_kerberos_enabled(self): self.assertTrue(result) self.client.send_request.assert_called_once_with( 'kerberos-config-get', kerberos_config_get_args) - self.client.get_network_interfaces.assert_called_once() + self.client.get_network_interfaces.assert_called_once_with( + protocols=['NFS', 'CIFS']) + + def test_is_kerberos_enabled_exception_raise(self): + self.client.features.add_feature('KERBEROS_VSERVER') + api_response = netapp_api.NaElement( + fake.KERBEROS_CONFIG_GET_RESPONSE) + self.mock_object(self.client, 'send_request', + mock.Mock(side_effect=[api_response, + netapp_api.NaApiError('foobar')])) + self.mock_object(self.client, + 'get_network_interfaces', + mock.Mock(return_value=[{'interface-name': 'lif1'}, + {'interface-name': 'lif2'}, + {'interface-name': 'lif3'}])) + + self.assertRaises(netapp_api.NaApiError, + self.client.is_kerberos_enabled) + + kerberos_config_get_args_lif1 = { + 'interface-name': 'lif1', + 'desired-attributes': { + 'kerberos-config-info': { + 'is-kerberos-enabled': None, + } + } + } + + kerberos_config_get_args_lif2 = { + 'interface-name': 'lif2', + 'desired-attributes': { + 'kerberos-config-info': { + 'is-kerberos-enabled': None, + } + } + } + + self.client.send_request.assert_has_calls([ + mock.call('kerberos-config-get', kerberos_config_get_args_lif1), + mock.call('kerberos-config-get', kerberos_config_get_args_lif2), + ]) + self.client.get_network_interfaces.assert_called_once_with( + protocols=['NFS', 'CIFS']) + + def test_is_kerberos_enabled_exception_return_false(self): + self.client.features.add_feature('KERBEROS_VSERVER') + api_response = netapp_api.NaElement( + fake.KERBEROS_CONFIG_GET_RESPONSE) + self.mock_object( + self.client, 'send_request', + mock.Mock(side_effect=[api_response, netapp_api.NaApiError( + message="entry doesn't exist")])) + self.mock_object(self.client, + 'get_network_interfaces', + mock.Mock(return_value=[{'interface-name': 'lif1'}, + {'interface-name': 'lif2'}, + {'interface-name': 'lif3'}])) + + result = self.client.is_kerberos_enabled() + + kerberos_config_get_args_lif1 = { + 'interface-name': 'lif1', + 'desired-attributes': { + 'kerberos-config-info': { + 'is-kerberos-enabled': None, + } + } + } + + kerberos_config_get_args_lif2 = { + 'interface-name': 'lif2', + 'desired-attributes': { + 'kerberos-config-info': { + 'is-kerberos-enabled': None, + } + } + } + + self.assertFalse(result) + self.client.send_request.assert_has_calls([ + mock.call('kerberos-config-get', kerberos_config_get_args_lif1), + mock.call('kerberos-config-get', kerberos_config_get_args_lif2), + ]) + self.client.get_network_interfaces.assert_called_once_with( + protocols=['NFS', 'CIFS']) def test_get_kerberos_service_principal_name(self): @@ -3203,10 +3300,13 @@ def test_create_volume(self, set_max_files): self.mock_object( self.client, '_get_create_volume_api_args', mock.Mock(return_value={})) + options = {'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME} self.client.create_volume( fake.SHARE_AGGREGATE_NAME, fake.SHARE_NAME, 100, - max_files=fake.MAX_FILES if set_max_files else None) + max_files=fake.MAX_FILES if set_max_files else None, + **options + ) volume_create_args = { 'containing-aggr-name': fake.SHARE_AGGREGATE_NAME, @@ -3216,11 +3316,15 @@ def test_create_volume(self, set_max_files): self.client._get_create_volume_api_args.assert_called_once_with( fake.SHARE_NAME, False, None, None, None, 'rw', None, False, - None, None) + None, None, None) self.client.send_request.assert_called_with('volume-create', volume_create_args) - (self.client.update_volume_efficiency_attributes. - assert_called_once_with(fake.SHARE_NAME, False, False)) + ( + self.client.update_volume_efficiency_attributes. + assert_called_once_with + (fake.SHARE_NAME, False, False, + efficiency_policy=fake.VOLUME_EFFICIENCY_POLICY_NAME) + ) if set_max_files: self.client.set_volume_max_files.assert_called_once_with( fake.SHARE_NAME, fake.MAX_FILES) @@ -3250,6 +3354,31 @@ def test_create_volume_thin_provisioned(self, thin_provisioned): self.client.send_request.assert_called_once_with('volume-create', volume_create_args) + @ddt.data("compliance", "enterprise") + def test_create_volume_snaplock_type(self, snaplock_type): + + self.mock_object(self.client, 'send_request') + self.mock_object(self.client, 'update_volume_efficiency_attributes') + self.mock_object(self.client, 'set_snaplock_attributes') + + self.client.create_volume( + fake.SHARE_AGGREGATE_NAME, fake.SHARE_NAME, 100, + snaplock_type=snaplock_type) + + volume_create_args = { + 'containing-aggr-name': fake.SHARE_AGGREGATE_NAME, + 'size': '100g', + 'volume': fake.SHARE_NAME, + 'volume-type': 'rw', + 'junction-path': '/%s' % fake.SHARE_NAME, + 'space-reserve': 'volume', + 'encrypt': 'false', + 'snaplock-type': snaplock_type, + } + + self.client.send_request.assert_called_once_with('volume-create', + volume_create_args) + def test_create_volume_adaptive_not_supported(self): self.client.features.add_feature('ADAPTIVE_QOS', supported=False) @@ -3293,7 +3422,7 @@ def test_create_volume_async(self, auto_provisioned): self.client._get_create_volume_api_args.assert_called_once_with( fake.SHARE_NAME, False, None, None, None, 'rw', None, False, - None, None) + None, None, None) self.client.send_request.assert_called_with('volume-create-async', volume_create_args) self.assertEqual(expected_result, result) @@ -3490,6 +3619,32 @@ def test_is_flexvol_encrypted_8_x_system_version_response(self): self.assertFalse(result) + def test_update_volume_snapshot_policy(self): + self.mock_object(self.client, 'send_request') + + self.client.update_volume_snapshot_policy(fake.SHARE_NAME, + fake.SNAPSHOT_POLICY_NAME) + + volume_modify_iter_api_args = { + 'query': { + 'volume-attributes': { + 'volume-id-attributes': { + 'name': fake.SHARE_NAME, + }, + }, + }, + 'attributes': { + 'volume-attributes': { + 'volume-snapshot-attributes': { + 'snapshot-policy': fake.SNAPSHOT_POLICY_NAME, + }, + }, + }, + } + + self.client.send_request.assert_called_once_with( + 'volume-modify-iter', volume_modify_iter_api_args) + def test_enable_dedup(self): self.mock_object(self.client, 'send_request') @@ -3629,6 +3784,50 @@ def test_disable_compression_async(self): self.client.connection.send_request.assert_called_once_with( 'sis-set-config-async', sis_set_config_args) + def test_apply_volume_efficiency_policy_with_policy(self): + self.mock_object(self.client, 'send_request') + self.client.apply_volume_efficiency_policy( + fake.SHARE_NAME, fake.VOLUME_EFFICIENCY_POLICY_NAME + ) + + volume_efficiency_config_args = { + 'path': '/vol/%s' % fake.SHARE_NAME, + 'policy-name': fake.VOLUME_EFFICIENCY_POLICY_NAME + } + + self.client.send_request.assert_called_once_with( + 'sis-set-config', volume_efficiency_config_args) + + def test_apply_volume_efficiency_policy_without_policy(self): + self.mock_object(self.client, 'send_request') + self.client.apply_volume_efficiency_policy( + fake.SHARE_NAME, None + ) + + self.client.send_request.assert_not_called() + + def test_apply_volume_efficiency_policy_async_with_policy(self): + self.mock_object(self.client.connection, 'send_request') + self.client.apply_volume_efficiency_policy_async( + fake.SHARE_NAME, fake.VOLUME_EFFICIENCY_POLICY_NAME + ) + + volume_efficiency_config_args = { + 'path': '/vol/%s' % fake.SHARE_NAME, + 'policy-name': fake.VOLUME_EFFICIENCY_POLICY_NAME + } + + self.client.connection.send_request.assert_called_once_with( + 'sis-set-config-async', volume_efficiency_config_args) + + def test_apply_volume_efficiency_policy_async_without_policy(self): + self.mock_object(self.client.connection, 'send_request') + self.client.apply_volume_efficiency_policy_async( + fake.SHARE_NAME + ) + + self.client.connection.send_request.assert_not_called() + def test_get_volume_efficiency_status(self): api_response = netapp_api.NaElement(fake.SIS_GET_ITER_RESPONSE) @@ -3729,6 +3928,8 @@ def test_modify_volume_no_optional_args(self, is_flexgroup): self.mock_object(self.client, 'send_request') mock_update_volume_efficiency_attributes = self.mock_object( self.client, 'update_volume_efficiency_attributes') + self.mock_object(self.client, '_is_snaplock_enabled_volume', + mock.Mock(return_value=True)) aggr = fake.SHARE_AGGREGATE_NAME if is_flexgroup: @@ -3769,7 +3970,9 @@ def test_modify_volume_no_optional_args(self, is_flexgroup): self.client.send_request.assert_called_once_with( 'volume-modify-iter', volume_modify_iter_api_args) mock_update_volume_efficiency_attributes.assert_called_once_with( - fake.SHARE_NAME, False, False, is_flexgroup=is_flexgroup) + fake.SHARE_NAME, False, False, + is_flexgroup=is_flexgroup, efficiency_policy=None + ) @ddt.data((fake.QOS_POLICY_GROUP_NAME, None), (None, fake.ADAPTIVE_QOS_POLICY_GROUP_NAME)) @@ -3780,6 +3983,9 @@ def test_modify_volume_all_optional_args(self, qos_group, self.mock_object(self.client, 'send_request') mock_update_volume_efficiency_attributes = self.mock_object( self.client, 'update_volume_efficiency_attributes') + options = {'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME} + self.mock_object(self.client, '_is_snaplock_enabled_volume', + mock.Mock(return_value=True)) self.client.modify_volume( fake.SHARE_AGGREGATE_NAME, @@ -3793,7 +3999,9 @@ def test_modify_volume_all_optional_args(self, qos_group, qos_policy_group=qos_group, adaptive_qos_policy_group=adaptive_qos_group, autosize_attributes=fake.VOLUME_AUTOSIZE_ATTRS, - hide_snapdir=True) + hide_snapdir=True, + **options + ) volume_modify_iter_api_args = { 'query': { @@ -3843,30 +4051,52 @@ def test_modify_volume_all_optional_args(self, qos_group, self.client.send_request.assert_called_once_with( 'volume-modify-iter', volume_modify_iter_api_args) mock_update_volume_efficiency_attributes.assert_called_once_with( - fake.SHARE_NAME, True, False, is_flexgroup=False) + fake.SHARE_NAME, True, False, + is_flexgroup=False, + efficiency_policy=fake.VOLUME_EFFICIENCY_POLICY_NAME + ) @ddt.data( - {'existing': (True, True), 'desired': (True, True), 'fg': False}, - {'existing': (True, True), 'desired': (False, False), 'fg': False}, - {'existing': (True, True), 'desired': (True, False), 'fg': False}, - {'existing': (True, False), 'desired': (True, False), 'fg': False}, - {'existing': (True, False), 'desired': (False, False), 'fg': False}, - {'existing': (True, False), 'desired': (True, True), 'fg': False}, - {'existing': (False, False), 'desired': (False, False), 'fg': False}, - {'existing': (False, False), 'desired': (True, False), 'fg': False}, - {'existing': (False, False), 'desired': (True, True), 'fg': False}, - {'existing': (True, True), 'desired': (True, True), 'fg': True}, - {'existing': (True, True), 'desired': (False, False), 'fg': True}, - {'existing': (True, True), 'desired': (True, False), 'fg': True}, - {'existing': (True, False), 'desired': (True, False), 'fg': True}, - {'existing': (True, False), 'desired': (False, False), 'fg': True}, - {'existing': (True, False), 'desired': (True, True), 'fg': True}, - {'existing': (False, False), 'desired': (False, False), 'fg': True}, - {'existing': (False, False), 'desired': (True, False), 'fg': True}, - {'existing': (False, False), 'desired': (True, True), 'fg': True}, + {'existing': (True, True), 'desired': (True, True), 'fg': False, + 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, + {'existing': (True, True), 'desired': (False, False), 'fg': False, + 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, + {'existing': (True, True), 'desired': (True, False), 'fg': False, + 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, + {'existing': (True, False), 'desired': (True, False), 'fg': False, + 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, + {'existing': (True, False), 'desired': (False, False), 'fg': False, + 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, + {'existing': (True, False), 'desired': (True, True), 'fg': False, + 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, + {'existing': (False, False), 'desired': (False, False), 'fg': False, + 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, + {'existing': (False, False), 'desired': (True, False), 'fg': False, + 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, + {'existing': (False, False), 'desired': (True, True), 'fg': False, + 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, + {'existing': (True, True), 'desired': (True, True), 'fg': True, + 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, + {'existing': (True, True), 'desired': (False, False), 'fg': True, + 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, + {'existing': (True, True), 'desired': (True, False), 'fg': True, + 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, + {'existing': (True, False), 'desired': (True, False), 'fg': True, + 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, + {'existing': (True, False), 'desired': (False, False), 'fg': True, + 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, + {'existing': (True, False), 'desired': (True, True), 'fg': True, + 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, + {'existing': (False, False), 'desired': (False, False), 'fg': True, + 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, + {'existing': (False, False), 'desired': (True, False), 'fg': True, + 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, + {'existing': (False, False), 'desired': (True, True), 'fg': True, + 'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME}, ) @ddt.unpack - def test_update_volume_efficiency_attributes(self, existing, desired, fg): + def test_update_volume_efficiency_attributes(self, existing, desired, fg, + efficiency_policy): existing_dedupe = existing[0] existing_compression = existing[1] @@ -3892,10 +4122,17 @@ def test_update_volume_efficiency_attributes(self, existing, desired, fg): mock_disable_dedup = self.mock_object(self.client, 'disable_dedup') mock_disable_dedup_async = self.mock_object(self.client, 'disable_dedupe_async') + mock_apply_volume_efficiency_policy = ( + self.mock_object(self.client, 'apply_volume_efficiency_policy')) + mock_apply_volume_efficiency_policy_async = ( + self.mock_object(self.client, + 'apply_volume_efficiency_policy_async' + ) + ) self.client.update_volume_efficiency_attributes( fake.SHARE_NAME, desired_dedupe, desired_compression, - is_flexgroup=fg) + is_flexgroup=fg, efficiency_policy=efficiency_policy) if existing_dedupe == desired_dedupe: if fg: @@ -3941,6 +4178,11 @@ def test_update_volume_efficiency_attributes(self, existing, desired, fg): self.assertTrue(mock_enable_compression.called) self.assertFalse(mock_disable_compression.called) + if fg: + self.assertTrue(mock_apply_volume_efficiency_policy_async.called) + else: + self.assertTrue(mock_apply_volume_efficiency_policy.called) + def test_set_volume_size(self): api_response = netapp_api.NaElement(fake.VOLUME_MODIFY_ITER_RESPONSE) @@ -4379,6 +4621,9 @@ def test_get_volume(self, is_flexgroup): 'volume-qos-attributes': { 'policy-group-name': None, }, + 'volume-snaplock-attributes': { + 'snaplock-type': None, + }, }, }, } @@ -4397,6 +4642,7 @@ def test_get_volume(self, is_flexgroup): 'style-extended': (fake.FLEXGROUP_STYLE_EXTENDED if is_flexgroup else fake.FLEXVOL_STYLE_EXTENDED), + 'snaplock-type': 'compliance', } self.client.send_request.assert_has_calls([ mock.call('volume-get-iter', volume_get_iter_args)]) @@ -4440,6 +4686,9 @@ def test_get_volume_no_qos(self): 'volume-qos-attributes': { 'policy-group-name': None, }, + 'volume-snaplock-attributes': { + 'snaplock-type': None, + }, }, }, } @@ -4456,6 +4705,7 @@ def test_get_volume_no_qos(self): 'owning-vserver-name': fake.VSERVER_NAME, 'qos-policy-group-name': None, 'style-extended': fake.FLEXVOL_STYLE_EXTENDED, + 'snaplock-type': "compliance", } self.client.send_request.assert_has_calls([ mock.call('volume-get-iter', volume_get_iter_args)]) @@ -6045,7 +6295,7 @@ def test_get_aggregate_none_specified(self): self.assertEqual({}, result) def test_get_aggregate(self): - + self.client.features.SNAPLOCK = True api_response = netapp_api.NaElement( fake.AGGR_GET_ITER_SSC_RESPONSE).get_child_by_name( 'attributes-list').get_children() @@ -6054,7 +6304,6 @@ def test_get_aggregate(self): mock.Mock(return_value=api_response)) result = self.client.get_aggregate(fake.SHARE_AGGREGATE_NAME) - desired_attributes = { 'aggr-attributes': { 'aggregate-name': None, @@ -6068,6 +6317,11 @@ def test_get_aggregate(self): }, }, } + + if self.client.features.SNAPLOCK: + desired_attributes['aggr-attributes']['aggr-snaplock-attributes']\ + = {'is-snaplock': None, 'snaplock-type': None} + self.client._get_aggregates.assert_has_calls([ mock.call( aggregate_names=[fake.SHARE_AGGREGATE_NAME], @@ -6078,6 +6332,8 @@ def test_get_aggregate(self): 'raid-type': 'raid_dp', 'is-hybrid': False, 'is-home': True, + 'snaplock-type': 'compliance', + 'is-snaplock': 'true' } self.assertEqual(expected, result) @@ -9258,7 +9514,6 @@ def test_snapmirror_restore_vol(self): {'snapmirror_label': "fake_backup", 'newer_than': None}) @ddt.unpack def test_list_volume_snapshots(self, snapmirror_label, newer_than): - print(f"snapmirror_label: {snapmirror_label}") api_response = netapp_api.NaElement( fake.SNAPSHOT_GET_ITER_SNAPMIRROR_RESPONSE) self.mock_object(self.client, @@ -9287,3 +9542,190 @@ def test_list_volume_snapshots(self, snapmirror_label, newer_than): expected = [fake.SNAPSHOT_NAME] self.assertEqual(expected, result) + + def test_is_snaplock_compliance_clock_configured(self): + api_response = netapp_api.NaElement(fake.SNAPLOCK_CLOCK_CONFIG_1) + self.mock_object(self.client, + 'send_request', + mock.Mock(return_value=api_response)) + result = self.client.is_snaplock_compliance_clock_configured( + "fake_node", + ) + self.assertIs(True, result) + + def test_is_snaplock_compliance_clock_configured_negative(self): + api_response = netapp_api.NaElement(fake.SNAPLOCK_CLOCK_CONFIG_2) + self.mock_object(self.client, + 'send_request', + mock.Mock(return_value=api_response)) + result = self.client.is_snaplock_compliance_clock_configured( + "fake_node" + ) + self.assertIs(False, result) + + def test_is_snaplock_compliance_clock_configured_none(self): + api_response = netapp_api.NaElement(fake.SNAPLOCK_CLOCK_CONFIG_1) + self.mock_object(self.client, + 'send_request', + mock.Mock(return_value=api_response)) + self.mock_object(api_response, + 'get_child_by_name', + mock.Mock(return_value=None)) + self.assertRaises( + exception.NetAppException, + self.client.is_snaplock_compliance_clock_configured, + "node1" + ) + + @ddt.data({'options': {'snaplock_autocommit_period': "4hours", + 'snaplock_min_retention_period': "6days", + 'snaplock_max_retention_period': "8months", + 'snaplock_default_retention_period': "8days"}, + }, + {'options': {'snaplock_autocommit_period': "4hours", + 'snaplock_min_retention_period': "6days", + 'snaplock_max_retention_period': "8months", + 'snaplock_default_retention_period': "min"}, + }, + {'options': {'snaplock_autocommit_period': "4hours", + 'snaplock_min_retention_period': "6days", + 'snaplock_max_retention_period': "8months", + 'snaplock_default_retention_period': "max"}, + }, + ) + @ddt.unpack + def test_set_snaplock_attributes(self, options): + api_args = { + 'volume': fake.SHARE_NAME, + 'autocommit-period': options.get('snaplock_autocommit_period'), + 'minimum-retention-period': options.get( + 'snaplock_min_retention_period'), + 'maximum-retention-period': options.get( + 'snaplock_max_retention_period'), + 'default-retention-period': options.get( + 'snaplock_default_retention_period'), + } + if options.get('snaplock_default_retention_period') == "min": + api_args['default-retention-period'] = options.get( + 'snaplock_min_retention_period') + elif options.get('snaplock_default_retention_period') == 'max': + api_args['default-retention-period'] = options.get( + 'snaplock_max_retention_period') + self.mock_object(self.client, 'send_request') + self.client.set_snaplock_attributes(fake.SHARE_NAME, **options) + self.client.send_request.assert_has_calls([ + mock.call('volume-set-snaplock-attrs', api_args)]) + + def test_set_snaplock_attributes_all_none(self): + self.mock_object(self.client, 'send_request') + options = {'snaplock_autocommit_period': None, + 'snaplock_min_retention_period': None, + 'snaplock_max_retention_period': None, + 'snaplock_default_retention_period': None, + } + self.client.set_snaplock_attributes(fake.SHARE_NAME, **options) + self.client.send_request.assert_not_called() + + def test_get_vserver_aggr_snaplock_type(self): + self.client.features.SNAPLOCK = True + api_response = netapp_api.NaElement( + fake.VSERVER_SHOW_AGGR_GET_RESPONSE, + ) + self.mock_object(self.client, + 'send_iter_request', + mock.Mock(return_value=api_response)) + result = self.client.get_vserver_aggr_snaplock_type( + fake.SHARE_AGGREGATE_NAMES + ) + self.assertEqual("compliance", result) + + def test_get_vserver_aggr_snaplock_type_negative(self): + self.client.features.SNAPLOCK = False + api_response = netapp_api.NaElement( + fake.VSERVER_SHOW_AGGR_GET_RESPONSE, + ) + self.mock_object(self.client, + 'send_iter_request', + mock.Mock(return_value=api_response)) + result = self.client.get_vserver_aggr_snaplock_type( + fake.SHARE_AGGREGATE_NAMES + ) + self.assertIsNone(result) + + @ddt.data("compliance", "enterprise") + def test__is_snaplock_enabled_volume_true(self, snaplock_type): + vol_attr = {'snaplock-type': snaplock_type} + self.mock_object(self.client, + 'get_volume', + mock.Mock(return_value=vol_attr)) + result = self.client._is_snaplock_enabled_volume( + fake.SHARE_AGGREGATE_NAMES + ) + self.assertIs(True, result) + + def test__is_snaplock_enabled_volume_false(self): + vol_attr = {'snaplock-type': 'non-snaplock'} + self.mock_object(self.client, + 'get_volume', + mock.Mock(return_value=vol_attr)) + result = self.client._is_snaplock_enabled_volume( + fake.SHARE_AGGREGATE_NAMES + ) + self.assertIs(False, result) + + def test_get_storage_failover_partner(self): + api_response = netapp_api.NaElement(fake.STORAGE_FAIL_OVER_PARTNER) + self.mock_object(self.client, + 'send_request', + mock.Mock(return_value=api_response)) + + result = self.client.get_storage_failover_partner("fake_node") + self.assertEqual("fake_partner_node", result) + + def test_get_migratable_data_lif_for_node(self): + api_response = netapp_api.NaElement( + fake.NET_INTERFACE_GET_ITER_RESPONSE) + self.mock_object(self.client, + 'send_iter_request', + mock.Mock(return_value=api_response)) + failover_policy = ['system-defined', 'sfo-partner-only'] + protocols = ['nfs', 'cifs'] + api_args = { + 'query': { + 'net-interface-info': { + 'failover-policy': '|'.join(failover_policy), + 'home-node': "fake_node", + 'data-protocols': { + 'data-protocol': '|'.join(protocols), + } + } + } + } + result = self.client.get_migratable_data_lif_for_node("fake_node") + self.client.send_iter_request.assert_has_calls([ + mock.call('net-interface-get-iter', api_args)]) + self.assertEqual(list(fake.LIF_NAMES), result) + + def test_get_data_lif_details_for_nodes(self): + api_response = netapp_api.NaElement( + fake.DATA_LIF_CAPACITY_DETAILS) + self.mock_object(self.client, + 'send_iter_request', + mock.Mock(return_value=api_response)) + api_args = { + 'desired-attributes': { + 'data-lif-capacity-details-info': { + 'limit-for-node': None, + 'count-for-node': None, + 'node': None + }, + }, + } + expected_result = [{'limit-for-node': '512', + 'count-for-node': '44', + 'node': 'fake_node', + }] + result = self.client.get_data_lif_details_for_nodes() + self.client.send_iter_request.assert_has_calls([ + mock.call('data-lif-capacity-details', api_args)]) + self.assertEqual(expected_result, result) diff --git a/manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode_rest.py b/manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode_rest.py index 4de44a45d8..5ff4ce2196 100644 --- a/manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode_rest.py +++ b/manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode_rest.py @@ -28,6 +28,7 @@ from manila.share.drivers.netapp import utils as netapp_utils from manila import test from manila.tests.share.drivers.netapp.dataontap.client import fakes as fake +from manila import utils @ddt.ddt @@ -573,7 +574,7 @@ def test_get_aggregate(self): result = self.client.get_aggregate(fake.SHARE_AGGREGATE_NAME) fields = ('name,block_storage.primary.raid_type,' - 'block_storage.storage_type') + 'block_storage.storage_type,snaplock_type') self.client._get_aggregates.assert_has_calls([ mock.call( @@ -585,6 +586,8 @@ def test_get_aggregate(self): 'raid-type': response[0]['block_storage']['primary']['raid_type'], 'is-hybrid': response[0]['block_storage']['storage_type'] == 'hybrid', + 'snaplock-type': response[0]['snaplock_type'], + 'is-snaplock': response[0]['is_snaplock'] } self.assertEqual(expected, result) @@ -949,7 +952,8 @@ def test_get_volume(self): 'qos-policy-group-name': fake_volume.get('qos', {}) .get('policy', {}) .get('name'), - 'style-extended': fake_volume.get('style', '') + 'style-extended': fake_volume.get('style', ''), + 'snaplock-type': fake_volume.get('snaplock', {}).get('type', '') } self.mock_object(self.client, 'send_request', @@ -1036,17 +1040,23 @@ def test_create_volume(self): mock_update = self.mock_object( self.client, 'update_volume_efficiency_attributes') mock_max_files = self.mock_object(self.client, 'set_volume_max_files') + options = {'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME} self.client.create_volume(fake.SHARE_AGGREGATE_NAME, fake.VOLUME_NAMES[0], fake.SHARE_SIZE, - max_files=1) - + max_files=1, snaplock_type="enterprise", + **options) mock_create_volume_async.assert_called_once_with( [fake.SHARE_AGGREGATE_NAME], fake.VOLUME_NAMES[0], fake.SHARE_SIZE, is_flexgroup=False, thin_provisioned=False, snapshot_policy=None, language=None, max_files=1, snapshot_reserve=None, volume_type='rw', qos_policy_group=None, encrypt=False, - adaptive_qos_policy_group=None, mount_point_name=None) - mock_update.assert_called_once_with(fake.VOLUME_NAMES[0], False, False) + adaptive_qos_policy_group=None, mount_point_name=None, + efficiency_policy=fake.VOLUME_EFFICIENCY_POLICY_NAME, + snaplock_type="enterprise", + ) + mock_update.assert_called_once_with( + fake.VOLUME_NAMES[0], False, False, + efficiency_policy=fake.VOLUME_EFFICIENCY_POLICY_NAME) mock_max_files.assert_called_once_with(fake.VOLUME_NAMES[0], 1) def test_create_volume_async(self): @@ -1076,7 +1086,7 @@ def test_create_volume_async(self): self.client._get_create_volume_body.assert_called_once_with( fake.VOLUME_NAMES[0], False, None, None, None, 'rw', None, False, - None, None) + None, None, None) self.client.send_request.assert_called_once_with( '/storage/volumes', 'post', body=body, wait_on_accepted=True) self.assertEqual(expected_result, result) @@ -1187,6 +1197,46 @@ def test_disable_compression_async(self): self.client._get_volume_by_args.assert_called_once_with( vol_name=fake.VOLUME_NAMES[0]) + def test_apply_volume_efficiency_policy(self): + volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST + uuid = volume["uuid"] + return_value = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST + + self.mock_object(self.client, '_get_volume_by_args', + mock.Mock(return_value=volume)) + self.mock_object(self.client, 'send_request', + mock.Mock(return_value=return_value)) + + body = { + 'efficiency': {'policy': fake.VOLUME_EFFICIENCY_POLICY_NAME} + } + + self.client.apply_volume_efficiency_policy( + fake.VOLUME_NAMES[0], + efficiency_policy=fake.VOLUME_EFFICIENCY_POLICY_NAME + ) + + self.client.send_request.assert_called_once_with( + f'/storage/volumes/{uuid}', 'patch', body=body) + self.client._get_volume_by_args.assert_called_once_with( + vol_name=fake.VOLUME_NAMES[0]) + + def test_apply_volume_efficiency_none_policy(self): + volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST + return_value = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST + + self.mock_object(self.client, '_get_volume_by_args', + mock.Mock(return_value=volume)) + self.mock_object(self.client, 'send_request', + mock.Mock(return_value=return_value)) + + self.client.apply_volume_efficiency_policy( + fake.VOLUME_NAMES[0], + efficiency_policy=None + ) + + self.client._get_volume_by_args.assert_not_called() + def test_set_volume_max_files(self): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST uuid = volume["uuid"] @@ -2301,7 +2351,8 @@ def test_modify_volume_no_optional_args(self, is_flexgroup): volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) - + self.mock_object(self.client, '_is_snaplock_enabled_volume', + mock.Mock(return_value=True)) aggr = fake.SHARE_AGGREGATE_NAME if is_flexgroup: aggr = list(fake.SHARE_AGGREGATE_NAMES) @@ -2313,7 +2364,9 @@ def test_modify_volume_no_optional_args(self, is_flexgroup): self.client.send_request.assert_called_once_with( '/storage/volumes/' + volume['uuid'], 'patch', body=body) mock_update_volume_efficiency_attributes.assert_called_once_with( - fake.SHARE_NAME, False, False, is_flexgroup=is_flexgroup) + fake.SHARE_NAME, False, False, + is_flexgroup=is_flexgroup, efficiency_policy=None + ) @ddt.data((fake.QOS_POLICY_GROUP_NAME, None), (None, fake.ADAPTIVE_QOS_POLICY_GROUP_NAME)) @@ -2328,6 +2381,9 @@ def test_modify_volume_all_optional_args(self, qos_group, volume = fake.VOLUME_ITEM_SIMPLE_RESPONSE_REST self.mock_object(self.client, '_get_volume_by_args', mock.Mock(return_value=volume)) + options = {'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME} + self.mock_object(self.client, '_is_snaplock_enabled_volume', + mock.Mock(return_value=True)) self.client.modify_volume( fake.SHARE_AGGREGATE_NAME, @@ -2341,7 +2397,9 @@ def test_modify_volume_all_optional_args(self, qos_group, qos_policy_group=qos_group, adaptive_qos_policy_group=adaptive_qos_group, autosize_attributes=fake.VOLUME_AUTOSIZE_ATTRS, - hide_snapdir=True) + hide_snapdir=True, + **options + ) qos_policy_name = qos_group or adaptive_qos_group body = { @@ -2363,7 +2421,10 @@ def test_modify_volume_all_optional_args(self, qos_group, self.client.send_request.assert_called_once_with( '/storage/volumes/' + volume['uuid'], 'patch', body=body) mock_update_volume_efficiency_attributes.assert_called_once_with( - fake.SHARE_NAME, True, False, is_flexgroup=False) + fake.SHARE_NAME, True, False, + is_flexgroup=False, + efficiency_policy=fake.VOLUME_EFFICIENCY_POLICY_NAME + ) def test__parse_timestamp(self): test_time_str = '2022-11-25T14:41:20+00:00' @@ -2718,13 +2779,14 @@ def test__get_create_volume_body(self, thin_provisioned): expected = { 'type': 'fake_type', 'guarantee.type': ('none' if thin_provisioned else 'volume'), - 'nas.path': '/%s' % fake.VOLUME_NAMES[0], + 'nas.path': '/%s' % fake.SHARE_MOUNT_POINT, 'snapshot_policy.name': fake.SNAPSHOT_POLICY_NAME, 'language': 'fake_language', 'space.snapshot.reserve_percent': 'fake_percent', 'qos.policy.name': fake.QOS_POLICY_GROUP_NAME, 'svm.name': 'fake_vserver', - 'encryption.enabled': 'true' + 'encryption.enabled': 'true', + 'snaplock.type': 'compliance', } self.mock_object(self.client.connection, 'get_vserver', @@ -2737,7 +2799,9 @@ def test__get_create_volume_body(self, thin_provisioned): 'fake_type', fake.QOS_POLICY_GROUP_NAME, True, - fake.QOS_POLICY_GROUP_NAME) + fake.QOS_POLICY_GROUP_NAME, + fake.SHARE_MOUNT_POINT, + "compliance") self.assertEqual(expected, res) def test_get_job_state(self): @@ -2766,6 +2830,23 @@ def test_get_job_state_not_found(self): self.client.get_job_state, 'fake_uuid') + def test_update_volume_snapshot_policy(self): + return_uuid = { + 'uuid': 'fake_uuid' + } + mock_get_vol = self.mock_object(self.client, '_get_volume_by_args', + mock.Mock(return_value=return_uuid)) + mock_sr = self.mock_object(self.client, 'send_request') + + self.client.update_volume_snapshot_policy('fake_volume_name', + fake.SNAPSHOT_POLICY_NAME) + body = { + 'snapshot_policy.name': fake.SNAPSHOT_POLICY_NAME + } + mock_sr.assert_called_once_with('/storage/volumes/fake_uuid', + 'patch', body=body) + mock_get_vol.assert_called_once_with(vol_name='fake_volume_name') + @ddt.data(True, False) def test_update_volume_efficiency_attributes(self, status): response = { @@ -2778,16 +2859,29 @@ def test_update_volume_efficiency_attributes(self, status): dis_dedupe = self.mock_object(self.client, 'disable_dedupe_async') en_comp = self.mock_object(self.client, 'enable_compression_async') dis_comp = self.mock_object(self.client, 'disable_compression_async') + apply_efficiency_policy = self.mock_object( + self.client, 'apply_volume_efficiency_policy' + ) - self.client.update_volume_efficiency_attributes(fake.VOLUME_NAMES[0], - status, status) + self.client.update_volume_efficiency_attributes( + fake.VOLUME_NAMES[0], + status, status, + efficiency_policy=fake.VOLUME_EFFICIENCY_POLICY_NAME) if status: en_dedupe.assert_called_once_with(fake.VOLUME_NAMES[0]) en_comp.assert_called_once_with(fake.VOLUME_NAMES[0]) + apply_efficiency_policy.assert_called_once_with( + fake.VOLUME_NAMES[0], + efficiency_policy=fake.VOLUME_EFFICIENCY_POLICY_NAME + ) else: dis_dedupe.assert_called_once_with(fake.VOLUME_NAMES[0]) dis_comp.assert_called_once_with(fake.VOLUME_NAMES[0]) + apply_efficiency_policy.assert_called_once_with( + fake.VOLUME_NAMES[0], + efficiency_policy=fake.VOLUME_EFFICIENCY_POLICY_NAME + ) def test_trigger_volume_move_cutover(self): query = { @@ -4499,6 +4593,31 @@ def test_delete_vserver_peer(self): self.client.send_request.assert_called_once_with( '/svm/peers/fake_uuid', 'delete', enable_tunneling=False) + def test_update_showmount(self): + query = { + 'name': fake.VSERVER_NAME, + 'fields': 'uuid' + } + response_svm = fake.SVMS_LIST_SIMPLE_RESPONSE_REST + self.client.vserver = fake.VSERVER_NAME + self.mock_object(self.client, + 'send_request', + mock.Mock(side_effect=[response_svm, None])) + + fake_showmount = 'true' + self.client.update_showmount(fake_showmount) + + svm_id = response_svm.get('records')[0]['uuid'] + + body = { + 'showmount_enabled': fake_showmount, + } + self.client.send_request.assert_has_calls([ + mock.call('/svm/svms', 'get', query=query), + mock.call(f'/protocols/nfs/services/{svm_id}', + 'patch', body=body) + ]) + @ddt.data({'tcp-max-xfer-size': 10000}, {}, None) def test_enable_nfs(self, nfs_config): self.mock_object(self.client, '_get_unique_svm_by_name', @@ -6501,7 +6620,8 @@ def test_get_volume_no_aggregate(self): 'qos-policy-group-name': fake_volume.get('qos', {}) .get('policy', {}) .get('name', ''), - 'style-extended': fake_volume.get('style', '') + 'style-extended': fake_volume.get('style', ''), + 'snaplock-type': fake_volume.get('snaplock', {}).get('type', '') } result = self.client.get_volume(fake.VOLUME_NAMES[0]) self.assertEqual(expected, result) @@ -6976,3 +7096,174 @@ def test_create_snapmirror_policy_rest(self, policy_type, body["retention"] = [{"label": 'backup', "count": 30}] self.client.send_request.assert_called_once_with( '/snapmirror/policies/', 'post', body=body) + + def test_is_snaplock_compliance_clock_configured(self): + self.mock_object(self.client, + '_get_cluster_node_uuid', + mock.Mock(return_value="uuid")) + api_response = {'time': 'Thu Aug 08 00:51:30 EDT 2024 -04:00'} + self.mock_object(self.client, + 'send_request', + mock.Mock(return_value=api_response)) + result = self.client.is_snaplock_compliance_clock_configured( + "test_node" + ) + self.assertIs(True, result) + + def test_is_snaplock_compliance_clock_configured_negative(self): + self.mock_object(self.client, + '_get_cluster_node_uuid', + mock.Mock(return_value="uuid")) + api_response = {'time': "not configured"} + self.mock_object(self.client, + 'send_request', + mock.Mock(return_value=api_response)) + result = self.client.is_snaplock_compliance_clock_configured( + "test_node" + ) + self.assertIs(False, result) + + @ddt.data({'options': {'snaplock_autocommit_period': "4hours", + 'snaplock_min_retention_period': "6days", + 'snaplock_max_retention_period': "8months", + 'snaplock_default_retention_period': "8days"}, + }, + {'options': {'snaplock_autocommit_period': "4hours", + 'snaplock_min_retention_period': "6days", + 'snaplock_max_retention_period': "8months", + 'snaplock_default_retention_period': "min"}, + }, + {'options': {'snaplock_autocommit_period': "4hours", + 'snaplock_min_retention_period': "6days", + 'snaplock_max_retention_period': "8months", + 'snaplock_default_retention_period': "max"}, + }, + ) + @ddt.unpack + def test_set_snaplock_attributes(self, options): + self.mock_object(self.client, 'send_request') + + body = { + 'snaplock.autocommit_period': + utils.convert_time_duration_to_iso_format( + options.get('snaplock_autocommit_period')), + 'snaplock.retention.minimum': + utils.convert_time_duration_to_iso_format( + options.get('snaplock_min_retention_period')), + 'snaplock.retention.maximum': + utils.convert_time_duration_to_iso_format( + options.get('snaplock_max_retention_period')), + } + if options.get('snaplock_default_retention_period') == "min": + body['snaplock.retention.default'] = ( + utils.convert_time_duration_to_iso_format( + options.get('snaplock_min_retention_period')) + ) + elif options.get('snaplock_default_retention_period') == 'max': + body['snaplock.retention.default'] = ( + utils.convert_time_duration_to_iso_format( + options.get('snaplock_max_retention_period')) + ) + else: + body['snaplock.retention.default'] = ( + utils.convert_time_duration_to_iso_format( + options.get('snaplock_default_retention_period')) + ) + + self.mock_object(self.client, + '_get_volume_by_args', + mock.Mock(return_value={'uuid': fake.FAKE_UUID})) + self.client.set_snaplock_attributes(fake.SHARE_NAME, **options) + + vol_uid = fake.FAKE_UUID + self.client.send_request.assert_called_once_with( + f'/storage/volumes/{vol_uid}', 'patch', body=body) + + def test_set_snaplock_attributes_none(self): + self.mock_object(self.client, 'send_request') + self.mock_object(self.client, + '_get_volume_by_args', + mock.Mock(return_value={'uuid': fake.FAKE_UUID})) + options = {'snaplock_autocommit_period': None, + 'snaplock_min_retention_period': None, + 'snaplock_max_retention_period': None, + 'snaplock_default_retention_period': None, + } + self.client.set_snaplock_attributes(fake.SHARE_NAME, **options) + self.client.send_request.assert_not_called() + + def test__get_cluster_node_uuid(self): + response = {'records': [{'uuid': fake.FAKE_UUID}]} + self.mock_object(self.client, + 'send_request', + mock.Mock(return_value=response)) + + result = self.client._get_cluster_node_uuid("fake_node") + self.assertEqual(result, fake.FAKE_UUID) + + @ddt.data("compliance", "enterprise") + def test__is_snaplock_enabled_volume_true(self, snaplock_type): + vol_attr = {'snaplock-type': snaplock_type} + self.mock_object(self.client, + 'get_volume', + mock.Mock(return_value=vol_attr)) + result = self.client._is_snaplock_enabled_volume( + fake.SHARE_AGGREGATE_NAMES + ) + self.assertIs(True, result) + + def test__is_snaplock_enabled_volume_false(self): + vol_attr = {'snaplock-type': "non-snaplock"} + self.mock_object(self.client, + 'get_volume', + mock.Mock(return_value=vol_attr)) + result = self.client._is_snaplock_enabled_volume( + fake.SHARE_AGGREGATE_NAMES + ) + self.assertIs(False, result) + + def test_get_storage_failover_partner(self): + self.mock_object(self.client, + '_get_cluster_node_uuid', + mock.Mock(return_value=fake.FAKE_UUID)) + response = {'ha': {'partners': [{'name': 'partner_node'}]}} + self.mock_object(self.client, + 'send_request', + mock.Mock(return_value=response)) + result = self.client.get_storage_failover_partner("fake_node") + self.assertEqual(result, "partner_node") + + def test_get_migratable_data_lif_for_node(self): + api_response = fake.GENERIC_NETWORK_INTERFACES_GET_REPONSE + expected_result = [fake.LIF_NAME] + + self.mock_object(self.client, '_has_records', + mock.Mock(return_value=True)) + self.mock_object( + self.client, + 'send_request', + mock.Mock(side_effect=self._send_request_side_effect) + ) + uuid = api_response['records'][0]['uuid'] + result = self.client.get_migratable_data_lif_for_node("fake_node") + self.client.send_request.assert_any_call( + '/network/ip/interfaces', 'get', + query={ + 'services': 'data_nfs|data_cifs', + 'location.home_node.name': 'fake_node', + 'fields': 'name', + } + ) + self.client.send_request.assert_any_call( + f'/network/ip/interfaces/{uuid}', 'get' + ) + self.assertEqual(expected_result, result) + + def _send_request_side_effect(self, endpoint, method, query=None): + if (endpoint == '/network/ip/interfaces' and method == 'get' + and query is not None): + return {"records": [{"uuid": "fake_uuid", "name": fake.LIF_NAME}]} + elif (endpoint.startswith('/network/ip/interfaces/') + and method == 'get'): + return {'location': {'failover': 'sfo_partners_only'}} + return {} diff --git a/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_base.py b/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_base.py index 28bfc8218a..2047948673 100644 --- a/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_base.py +++ b/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_base.py @@ -156,6 +156,9 @@ def test_do_setup(self): self.mock_object( self.library._client, 'get_nfs_config_default', mock.Mock(return_value=fake.NFS_CONFIG_DEFAULT)) + self.mock_object(self.library._client, + 'list_cluster_nodes', + mock.Mock(return_value=['node1', 'node2'])) self.mock_object( self.library, '_check_snaprestore_license', mock.Mock(return_value=True)) @@ -163,10 +166,9 @@ def test_do_setup(self): self.library, '_get_licenses', mock.Mock(return_value=fake.LICENSES)) - mock_get_api_client.features.TRANSFER_LIMIT_NFS_CONFIG = True + mock_get_api_client.features.TRANSFER_LIMIT_NFS_CONFIG = True self.library.do_setup(self.context) - self.assertEqual(fake.LICENSES, self.library._licenses) mock_get_api_client.assert_called_once_with() (self.library._client.check_for_cluster_credentials. @@ -178,6 +180,9 @@ def test_do_setup(self): self.mock_object(self.library._client, 'check_for_cluster_credentials', mock.Mock(return_value=True)) + self.mock_object(self.library._client, + 'list_cluster_nodes', + mock.Mock(return_value=['node1', 'node2'])) self.mock_object( self.library, '_check_snaprestore_license', mock.Mock(return_value=True)) @@ -543,11 +548,9 @@ def test_get_pools(self): self.library._cache_pool_status = na_utils.DataCache(60) self.library._have_cluster_creds = True - result = self.library._get_pools( get_filter_function=fake.fake_get_filter_function, goodness_function='goodness') - self.assertListEqual(fake_pool, result) mock_find_aggr.assert_called_once_with() mock_get_flexgroup_aggr.assert_called_once_with() @@ -574,7 +577,9 @@ def test_get_pool_vserver_creds(self): self.library._ssc_stats = fake.SSC_INFO_VSERVER_CREDS self.library._perf_library.get_node_utilization_for_pool = ( mock.Mock(return_value=50.0)) - + self.mock_object(self.library, + '_get_aggregate_snaplock_type', + mock.Mock(return_value="compliance")) result = self.library._get_pool( fake_pool['pool_name'], fake_pool['total_capacity_gb'], fake_pool['free_capacity_gb'], fake_pool['allocated_capacity_gb']) @@ -593,6 +598,9 @@ def test_get_pool_cluster_creds(self): self.library._ssc_stats = fake.SSC_INFO self.library._perf_library.get_node_utilization_for_pool = ( mock.Mock(return_value=30.0)) + self.mock_object(self.library, + '_get_aggregate_snaplock_type', + mock.Mock(return_value="compliance")) result = self.library._get_pool( fake_pool['pool_name'], fake_pool['total_capacity_gb'], @@ -604,6 +612,9 @@ def test_get_flexvol_pool_space(self): total_gb, free_gb, used_gb = self.library._get_flexvol_pool_space( fake.AGGREGATE_CAPACITIES, fake.AGGREGATES[0]) + self.mock_object(self.library, + '_get_aggregate_snaplock_type', + mock.Mock(return_value="compliance")) self.assertEqual(total_gb, fake.POOLS[0]['total_capacity_gb']) self.assertEqual(free_gb, fake.POOLS[0]['free_capacity_gb']) @@ -1506,6 +1517,7 @@ def test_allocate_container(self, hide_snapdir, create_fpolicy, is_fg): provisioning_options = copy.deepcopy( fake.PROVISIONING_OPTIONS_WITH_FPOLICY) provisioning_options['hide_snapdir'] = hide_snapdir + provisioning_options['snaplock_type'] = "compliance" self.mock_object(self.library, '_get_backend_share_name', mock.Mock( return_value=fake.SHARE_NAME)) self.mock_object(share_utils, 'extract_host', mock.Mock( @@ -1624,21 +1636,25 @@ def test_create_flexgroup_share(self, max_files): mock_wait_for_flexgroup_deployment = self.mock_object( self.library, 'wait_for_flexgroup_deployment') aggr_list = [fake.AGGREGATE] - + options = {'efficiency_policy': fake.VOLUME_EFFICIENCY_POLICY_NAME} self.library._create_flexgroup_share(vserver_client, aggr_list, fake.SHARE_NAME, 100, 10, - max_files=max_files) - + max_files=max_files, + snaplock_type="compliance", + **options) start_timeout = (self.library.configuration. netapp_flexgroup_aggregate_not_busy_timeout) mock_wait_for_start.assert_called_once_with( start_timeout, vserver_client, aggr_list, fake.SHARE_NAME, 100, - 10, None) + 10, None, "compliance", + efficiency_policy=fake.VOLUME_EFFICIENCY_POLICY_NAME) mock_wait_for_flexgroup_deployment.assert_called_once_with( vserver_client, fake.JOB_ID, 2) - (vserver_client.update_volume_efficiency_attributes. - assert_called_once_with(fake.SHARE_NAME, False, False, - is_flexgroup=True)) + vserver_client.update_volume_efficiency_attributes.assert_called_once_with( # noqa + fake.SHARE_NAME, False, False, + is_flexgroup=True, + efficiency_policy=fake.VOLUME_EFFICIENCY_POLICY_NAME + ) if max_files: vserver_client.set_volume_max_files.assert_called_once_with( fake.SHARE_NAME, max_files) @@ -1665,14 +1681,16 @@ def test_wait_for_start_create_flexgroup(self): aggr_list = [fake.AGGREGATE] result = self.library.wait_for_start_create_flexgroup( - 20, vserver_client, aggr_list, fake.SHARE_NAME, 1, 10) + 20, vserver_client, aggr_list, fake.SHARE_NAME, 1, 10, + fake.MOUNT_POINT_NAME, "compliance") self.assertEqual(job, result) vserver_client.create_volume_async.assert_called_once_with( aggr_list, fake.SHARE_NAME, 1, is_flexgroup=True, snapshot_reserve=10, auto_provisioned=self.library._is_flexgroup_auto, - mount_point_name=None) + mount_point_name=fake.MOUNT_POINT_NAME, + snaplock_type="compliance") def test_wait_for_start_create_flexgroup_timeout(self): vserver_client = mock.Mock() @@ -1684,7 +1702,8 @@ def test_wait_for_start_create_flexgroup_timeout(self): self.assertRaises( exception.NetAppException, self.library.wait_for_start_create_flexgroup, 10, - vserver_client, aggr_list, fake.SHARE_NAME, 1, 10) + vserver_client, aggr_list, fake.SHARE_NAME, 1, 10, + fake.MOUNT_POINT_NAME, "compliance") def test_wait_for_flexgroup_deployment(self): vserver_client = mock.Mock() @@ -1876,6 +1895,12 @@ def test_get_provisioning_options_implicit_false(self): 'fpolicy_extensions_to_exclude': None, 'fpolicy_extensions_to_include': None, 'fpolicy_file_operations': None, + 'efficiency_policy': None, + 'snaplock_type': None, + 'snaplock_autocommit_period': None, + 'snaplock_min_retention_period': None, + 'snaplock_max_retention_period': None, + 'snaplock_default_retention_period': None, } self.assertEqual(expected, result) @@ -2016,6 +2041,87 @@ def test__check_fpolicy_file_operations_invalid_operation(self): fake.SHARE, invalid_ops) + @ddt.data('15minutes', '4hours', "8days", "5months", "2years") + def test__check_snaplock_attributes_autocommit_period(self, duration): + result = self.library._check_snaplock_attributes( + fake.SHARE, "netapp:snaplock_autocommit_period", duration) + self.assertIsNone(result) + + @ddt.data('15minutes', '4hours', "8days", "5months", "2years") + def test__check_snaplock_attributes_min_retention_period(self, duration): + result = self.library._check_snaplock_attributes( + fake.SHARE, "netapp:snaplock_min_retention_period", duration) + self.assertIsNone(result) + + @ddt.data('15minutes', '4hours', "8days", "5months", "2years", + "infinite") + def test__check_snaplock_attributes_max_retention_period(self, duration): + result = self.library._check_snaplock_attributes( + fake.SHARE, "netapp:snaplock_max_retention_period", duration) + self.assertIsNone(result) + + @ddt.data('15minutes', '4hours', "8days", "5months", "2years", + "infinite", "min", "max") + def test__check_snaplock_attributes_default_retention_period(self, + duration): + result = self.library._check_snaplock_attributes( + fake.SHARE, "netapp:snaplock_default_retention_period", duration) + self.assertIsNone(result) + + def test__check_snaplock_attributes_autocommit_period_negative(self): + self.assertRaises(exception.NetAppException, + self.library._check_snaplock_attributes, + fake.SHARE, + "netapp:snaplock_autocommit_period", + "invalid_period", + ) + + def test__check_snaplock_attributes_min_retention_period_negative(self): + self.assertRaises(exception.NetAppException, + self.library._check_snaplock_attributes, + fake.SHARE, + "netapp:snaplock_min_retention_period", + "invalid_period", + ) + + def test__check_snaplock_attributes_max_retention_period_negative(self): + self.assertRaises(exception.NetAppException, + self.library._check_snaplock_attributes, + fake.SHARE, + "netapp:snaplock_max_retention_period", + "invalid_period", + ) + + def test__check_snaplock_attributes_default_retention_period_neg(self): + self.assertRaises(exception.NetAppException, + self.library._check_snaplock_attributes, + fake.SHARE, + "netapp:snaplock_default_retention_period", + "invalid_period", + ) + + def test__check_snaplock_compatibility_true(self): + self.library._have_cluster_creds = True + self.library._is_snaplock_compliance_configured = True + self.mock_object(self.client, + 'list_cluster_nodes', + mock.Mock(return_value=(["node1", "node2"]))) + result = self.library._check_snaplock_compatibility() + self.assertIsNone(result) + + def test__check_snaplock_compatibility_false(self): + self.library._have_cluster_creds = True + self.library._is_snaplock_compliance_configured = False + self.mock_object(self.client, + 'list_cluster_nodes', + mock.Mock(return_value=(["node1", "node2"]))) + self.assertRaises(exception.NetAppException, + self.library._check_snaplock_compatibility) + + def test__check_snaplock_compatibility_not_cluster_scope(self): + self.library._have_cluster_creds = False + self.library._check_snaplock_compatibility() + def test_allocate_container_no_pool(self): vserver_client = mock.Mock() @@ -3556,6 +3662,7 @@ def test_update_access(self): [fake.SHARE_ACCESS], [], [], + [], share_server=fake.SHARE_SERVER) mock_get_vserver.assert_called_once_with( @@ -3589,6 +3696,7 @@ def test_update_access_no_share_server(self, get_vserver_exception): [fake.SHARE_ACCESS], [], [], + [], share_server=fake.SHARE_SERVER) mock_get_vserver.assert_called_once_with( @@ -3622,6 +3730,7 @@ def test_update_access_share_not_found(self): [fake.SHARE_ACCESS], [], [], + [], share_server=fake.SHARE_SERVER) mock_get_vserver.assert_called_once_with( @@ -3655,6 +3764,7 @@ def test_update_access_to_active_replica(self): [fake.SHARE_ACCESS], [], [], + [], share_server=fake.SHARE_SERVER) mock_get_vserver.assert_called_once_with( @@ -3690,6 +3800,7 @@ def test_update_access_to_in_sync_replica(self, is_readable): [fake.SHARE_ACCESS], [], [], + [], share_server=fake.SHARE_SERVER) if is_readable: @@ -3735,6 +3846,7 @@ def test_update_ssc_info(self): 'netapp_raid_type': 'raid4', 'netapp_disk_type': ['FCAL'], 'netapp_hybrid_aggregate': 'false', + 'netapp_snaplock_type': ['compliance', 'enterprise'], }, fake.AGGREGATES[1]: { 'netapp_aggregate': fake.AGGREGATES[1], @@ -3742,6 +3854,7 @@ def test_update_ssc_info(self): 'netapp_raid_type': 'raid_dp', 'netapp_disk_type': ['SATA', 'SSD'], 'netapp_hybrid_aggregate': 'true', + 'netapp_snaplock_type': ['compliance', 'enterprise'], }, fake.FLEXGROUP_POOL_NAME: { 'netapp_aggregate': fake.FLEXGROUP_POOL['netapp_aggregate'], @@ -3749,11 +3862,58 @@ def test_update_ssc_info(self): 'netapp_raid_type': 'raid4 raid_dp', 'netapp_disk_type': ['FCAL', 'SATA', 'SSD'], 'netapp_hybrid_aggregate': 'false true', + 'netapp_snaplock_type': fake.FLEXGROUP_POOL[ + 'netapp_snaplock_type'] }, } self.assertEqual(expected, self.library._ssc_stats) + def test_update_ssc_info_non_unified_aggr(self): + + self.library._flexgroup_pools = fake.FLEXGROUP_POOL_OPT + self.library._client.features.UNIFIED_AGGR = False + self.library._have_cluster_creds = True + self.mock_object(self.library, + '_find_matching_aggregates', + mock.Mock(return_value=fake.AGGREGATES)) + self.mock_object(self.library, + '_get_flexgroup_aggr_set', + mock.Mock(return_value=fake.FLEXGROUP_AGGR_SET)) + self.mock_object(self.library, + '_get_aggregate_info', + mock.Mock(return_value=fake.SSC_INFO_MAP)) + + self.library._update_ssc_info() + + expected = { + fake.AGGREGATES[0]: { + 'netapp_aggregate': fake.AGGREGATES[0], + 'netapp_flexgroup': False, + 'netapp_raid_type': 'raid4', + 'netapp_disk_type': ['FCAL'], + 'netapp_hybrid_aggregate': 'false', + 'netapp_snaplock_type': 'compliance', + }, + fake.AGGREGATES[1]: { + 'netapp_aggregate': fake.AGGREGATES[1], + 'netapp_flexgroup': False, + 'netapp_raid_type': 'raid_dp', + 'netapp_disk_type': ['SATA', 'SSD'], + 'netapp_hybrid_aggregate': 'true', + 'netapp_snaplock_type': 'enterprise', + }, + fake.FLEXGROUP_POOL_NAME: { + 'netapp_aggregate': fake.FLEXGROUP_POOL['netapp_aggregate'], + 'netapp_flexgroup': True, + 'netapp_raid_type': 'raid4 raid_dp', + 'netapp_disk_type': ['FCAL', 'SATA', 'SSD'], + 'netapp_hybrid_aggregate': 'false true', + 'netapp_snaplock_type': 'compliance enterprise', + }, + } + self.assertEqual(expected, self.library._ssc_stats) + def test_update_ssc_info_no_aggregates(self): self.library._flexgroup_pools = {} @@ -3778,6 +3938,9 @@ def test_update_ssc_info_no_cluster_creds(self): self.mock_object(self.library, '_get_aggregate_info', mock.Mock(return_value=fake.SSC_INFO_MAP)) + self.mock_object(self.client, + 'get_vserver_aggr_snaplock_type', + mock.Mock(return_value='compliance')) self.library._update_ssc_info() @@ -3785,10 +3948,12 @@ def test_update_ssc_info_no_cluster_creds(self): fake.AGGREGATES[0]: { 'netapp_aggregate': fake.AGGREGATES[0], 'netapp_flexgroup': False, + 'netapp_snaplock_type': ['compliance', 'enterprise'], }, fake.AGGREGATES[1]: { 'netapp_aggregate': fake.AGGREGATES[1], 'netapp_flexgroup': False, + 'netapp_snaplock_type': ['compliance', 'enterprise'], }, fake.FLEXGROUP_POOL_NAME: { 'netapp_aggregate': fake.FLEXGROUP_POOL['netapp_aggregate'], @@ -3814,12 +3979,14 @@ def test_get_aggregate_info(self): 'netapp_disk_type': 'FCAL', 'netapp_hybrid_aggregate': 'false', 'netapp_is_home': False, + 'netapp_snaplock_type': 'compliance', }, fake.AGGREGATES[1]: { 'netapp_raid_type': 'raid_dp', 'netapp_disk_type': ['SATA', 'SSD'], 'netapp_hybrid_aggregate': 'true', 'netapp_is_home': True, + 'netapp_snaplock_type': 'enterprise', }, } @@ -6472,6 +6639,41 @@ def test_migration_check_compatibility_destination_type_is_encrypted(self): [mock.call(share_server=fake.SHARE_SERVER), mock.call(share_server='dst_srv')]) + def test_migration_check_compatibility_snaplock_not_compatible(self): + self.library._have_cluster_creds = True + self.mock_object(self.library, '_get_backend_share_name', + mock.Mock(return_value=fake.SHARE_NAME)) + self.mock_object(data_motion, 'get_backend_configuration') + self.mock_object(self.library, '_get_vserver', + mock.Mock(return_value=(fake.VSERVER1, mock.Mock()))) + self.mock_object(share_utils, 'extract_host', mock.Mock( + side_effect=[ + 'destination_backend', 'destination_pool', 'source_pool'])) + mock_dm = mock.Mock() + self.mock_object(data_motion, 'DataMotionSession', + mock.Mock(return_value=mock_dm)) + self.mock_object(self.library, 'is_flexgroup_destination_host', + mock.Mock(return_value=False)) + self.mock_object(self.library, '_is_flexgroup_pool', + mock.Mock(return_value=False)) + self.mock_object(self.library, + '_is_snaplock_compatible_for_migration', + mock.Mock(return_value=False) + ) + migration_compatibility = self.library.migration_check_compatibility( + self.context, fake_share.fake_share_instance(), + fake_share.fake_share_instance(), share_server=fake.SHARE_SERVER, + destination_share_server='dst_srv') + + expected_compatibility = { + 'compatible': False, + 'writable': False, + 'nondisruptive': False, + 'preserve_metadata': False, + 'preserve_snapshots': False, + } + self.assertDictEqual(expected_compatibility, migration_compatibility) + def test_migration_start(self): mock_info_log = self.mock_object(lib_base.LOG, 'info') source_snapshots = mock.Mock() @@ -8904,3 +9106,81 @@ def _backup_mock_common_method_for_negative(self, self.mock_object(mock_des_client, 'list_volume_snapshots', mock.Mock(return_value=snap_list)) + + def test_update_share_from_metadata(self): + metadata = { + "snapshot_policy": "daily", + } + + share_instance = fake.SHARE_INSTANCE + mock_update_volume_snapshot_policy = self.mock_object( + self.library, 'update_volume_snapshot_policy') + + self.library.update_share_from_metadata(self.context, share_instance, + metadata) + + mock_update_volume_snapshot_policy.assert_called_once_with( + share_instance, "daily", share_server=None) + + def test_update_share_network_subnet_from_metadata(self): + metadata = { + "showmount": "true", + } + + mock_update_showmount = self.mock_object( + self.library, 'update_showmount') + + self.library.update_share_network_subnet_from_metadata( + self.context, + 'fake_share_network', + 'fake_share_network_subnet', + fake.SHARE_SERVER, + metadata) + + mock_update_showmount.assert_called_once_with( + "true", share_server=fake.SHARE_SERVER) + + def test__get_aggregate_snaplock_type_cluster_scope(self): + self.library._have_cluster_creds = True + self.mock_object(self.client, + 'get_aggregate', + mock.Mock(return_value={ + 'snaplock-type': 'compliance' + })) + result = self.library._get_aggregate_snaplock_type(fake.AGGREGATE) + self.assertEqual(result, "compliance") + + def test__get_aggregate_snaplock_type_vserver_scope(self): + self.library._have_cluster_creds = False + self.mock_object(self.client, + 'get_vserver_aggr_snaplock_type', + mock.Mock(return_value='enterprise')) + result = self.library._get_aggregate_snaplock_type(fake.AGGREGATE) + self.assertEqual(result, "enterprise") + + def test__is_snaplock_compatible_for_migration_for_unified_aggr(self): + self.library._client.features.UNIFIED_AGGR = True + result = self.library._is_snaplock_compatible_for_migration( + fake.AGGREGATE, + fake.AGGR_POOL_NAME + ) + self.assertTrue(result) + + def test__is_snaplock_compatible_for_migration_for_non_snaplock(self): + self.library._client.features.UNIFIED_AGGR = False + self.library._client.features.SNAPLOCK = False + + result = self.library._is_snaplock_compatible_for_migration( + fake.AGGREGATE, + fake.AGGR_POOL_NAME + ) + self.assertTrue(result) + + def test__is_snaplock_compatible_for_migration_non_unified_aggr(self): + self.library._client.features.UNIFIED_AGGR = False + self.library._client.features.SNAPLOCK = True + result = self.library._is_snaplock_compatible_for_migration( + fake.AGGREGATE, + fake.AGGR_POOL_NAME + ) + self.assertTrue(result) diff --git a/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_multi_svm.py b/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_multi_svm.py index 0d7c46282c..51e5462eb6 100644 --- a/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_multi_svm.py +++ b/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_multi_svm.py @@ -513,6 +513,13 @@ def test_setup_server(self, nfs_config_support, nfs_config=None): self.library, '_check_nfs_config_extra_specs_validity', mock.Mock()) + self.library.configuration.netapp_restrict_lif_creation_per_ha_pair = ( + True + ) + check_lif_limit = self.mock_object( + self.library, + '_check_data_lif_count_limit_reached_for_ha_pair', + ) mock_get_nfs_config = self.mock_object( self.library, "_get_nfs_config_provisioning_options", @@ -532,6 +539,7 @@ def test_setup_server(self, nfs_config_support, nfs_config=None): self.assertTrue(mock_validate_share_network_subnets.called) self.assertTrue(mock_get_vserver_name.called) self.assertTrue(mock_create_vserver.called) + self.assertTrue(check_lif_limit.called) if nfs_config_support: mock_get_extra_spec.assert_called_once_with( fake.SERVER_METADATA['share_type_id']) @@ -2366,6 +2374,13 @@ def test__check_compatibility_for_svm_migrate(self, expected_exception, self.fake_src_share_server['share_network_subnets'][0].get( 'neutron_subnet_id') } + self.library.configuration.netapp_restrict_lif_creation_per_ha_pair = ( + True + ) + check_lif_limit = self.mock_object( + self.library, + '_check_data_lif_count_limit_reached_for_ha_pair', + ) self.mock_object(self.library._client, 'list_cluster_nodes', mock.Mock(return_value=fake.CLUSTER_NODES)) self.mock_object(self.library, '_get_node_data_port', @@ -2404,6 +2419,7 @@ def test__check_compatibility_for_svm_migrate(self, expected_exception, 'segmentation_id'])) self.library._create_port_and_broadcast_domain.assert_called_once_with( fake.IPSPACE, network_info) + self.assertTrue(check_lif_limit.called) def test__check_compatibility_for_svm_migrate_check_failure(self): network_info = { @@ -4119,3 +4135,56 @@ def test__delete_backup_vserver(self): side_effect=exception.NetAppException( message=msg))) self.library._delete_backup_vserver(fake.SHARE_BACKUP, des_vserver) + + def test__check_data_lif_count_limit_reached_for_ha_pair_false(self): + nodes = ["node1", "node2"] + lif_detail = [{'node': "node1", + 'count-for-node': '44', + 'limit-for-node': '512'}, + {'node': "node2", + 'count-for-node': '50', + 'limit-for-node': '512'}] + + self.mock_object(self.client, + 'get_storage_failover_partner', + mock.Mock(return_value="node2")) + self.mock_object(self.client, + 'list_cluster_nodes', + mock.Mock(return_value=nodes)) + + self.mock_object(self.client, + 'get_data_lif_details_for_nodes', + mock.Mock(return_value=lif_detail)) + self.mock_object(self.client, + 'get_migratable_data_lif_for_node', + mock.Mock(return_value=["data_lif_1", "data_lif_2"])) + self.library._check_data_lif_count_limit_reached_for_ha_pair( + self.client) + + def test__check_data_lif_count_limit_reached_for_ha_pair_true(self): + nodes = ["node1", "node2"] + lif_detail = [{'node': "node1", + 'count-for-node': '511', + 'limit-for-node': '512'}, + {'node': "node2", + 'count-for-node': '250', + 'limit-for-node': '512'}] + self.mock_object(self.client, + 'get_storage_failover_partner', + mock.Mock(return_value="node2")) + self.mock_object(self.client, + 'list_cluster_nodes', + mock.Mock(return_value=nodes)) + + self.mock_object(self.client, + 'get_data_lif_details_for_nodes', + mock.Mock(return_value=lif_detail)) + self.mock_object(self.client, + 'get_migratable_data_lif_for_node', + mock.Mock(return_value=["data_lif_1", "data_lif_2"])) + + self.assertRaises( + exception.NetAppException, + self.library._check_data_lif_count_limit_reached_for_ha_pair, + self.client, + ) diff --git a/manila/tests/share/drivers/netapp/dataontap/fakes.py b/manila/tests/share/drivers/netapp/dataontap/fakes.py index ee96899518..aff9354900 100644 --- a/manila/tests/share/drivers/netapp/dataontap/fakes.py +++ b/manila/tests/share/drivers/netapp/dataontap/fakes.py @@ -115,6 +115,7 @@ FPOLICY_EXT_TO_EXCLUDE_LIST = ['jpg', 'mp3'] BACKUP_TYPE = "fake_backup_type" MOUNT_POINT_NAME = 'fake_mp' +VOLUME_EFFICIENCY_POLICY_NAME = 'fake_volume_efficiency_policy' JOB_ID = '123' JOB_STATE = 'success' @@ -263,6 +264,12 @@ 'netapp:tcp_max_xfer_size': 100, 'netapp:udp_max_xfer_size': 100, 'netapp:adaptive_qos_policy_group': None, + 'netapp:snaplock_type': "compliance", + 'netapp:snaplock_autocommit_period': '4months', + 'netapp:snaplock_min_retention_period': '30minutes', + 'netapp:snaplock_max_retention_period': '2years', + 'netapp:snaplock_default_retention_period': '2months' + } EXTRA_SPEC_WITH_REPLICATION = copy.copy(EXTRA_SPEC) @@ -408,6 +415,12 @@ 'fpolicy_extensions_to_exclude': None, 'fpolicy_extensions_to_include': None, 'fpolicy_file_operations': None, + 'efficiency_policy': None, + 'snaplock_type': None, + 'snaplock_autocommit_period': None, + 'snaplock_min_retention_period': None, + 'snaplock_max_retention_period': None, + 'snaplock_default_retention_period': None, } PROVISIONING_OPTIONS_STRING_MISSING_SPECS = { @@ -418,6 +431,12 @@ 'fpolicy_extensions_to_exclude': None, 'fpolicy_extensions_to_include': None, 'fpolicy_file_operations': None, + 'efficiency_policy': None, + 'snaplock_type': None, + 'snaplock_autocommit_period': None, + 'snaplock_min_retention_period': None, + 'snaplock_max_retention_period': None, + 'snaplock_default_retention_period': None, } PROVISIONING_OPTIONS_STRING_DEFAULT = { @@ -428,6 +447,12 @@ 'fpolicy_extensions_to_exclude': None, 'fpolicy_extensions_to_include': None, 'fpolicy_file_operations': None, + 'efficiency_policy': None, + 'snaplock_type': None, + 'snaplock_autocommit_period': None, + 'snaplock_min_retention_period': None, + 'snaplock_max_retention_period': None, + 'snaplock_default_retention_period': None, } SHORT_BOOLEAN_EXTRA_SPEC = { @@ -439,6 +464,7 @@ 'netapp:language': 'en-US', 'netapp:max_files': 5000, 'netapp:adaptive_qos_policy_group': None, + 'netapp:efficiency_policy': None, } SHORT_STRING_EXTRA_SPEC = { @@ -965,6 +991,7 @@ 'security_service_update_support': True, 'netapp_flexgroup': True, 'netapp_cluster_name': 'fake_cluster_name', + 'netapp_snaplock_type': ['compliance', 'enterprise'], } FLEXGROUP_AGGR_SET = set(FLEXGROUP_POOL_OPT[FLEXGROUP_POOL_NAME]) @@ -985,6 +1012,7 @@ 'netapp_hybrid_aggregate': 'false', 'netapp_aggregate': AGGREGATES[0], 'netapp_flexgroup': False, + 'netapp_snaplock_type': 'compliance', }, AGGREGATES[1]: { 'netapp_raid_type': 'raid_dp', @@ -992,6 +1020,7 @@ 'netapp_hybrid_aggregate': 'true', 'netapp_aggregate': AGGREGATES[1], 'netapp_flexgroup': False, + 'netapp_snaplock_type': 'enterprise', } } @@ -1000,11 +1029,13 @@ 'netapp_raid_type': 'raid4', 'netapp_disk_type': ['FCAL'], 'netapp_hybrid_aggregate': 'false', + 'netapp_snaplock_type': 'compliance', }, AGGREGATES[1]: { 'netapp_raid_type': 'raid_dp', 'netapp_disk_type': ['SATA', 'SSD'], 'netapp_hybrid_aggregate': 'true', + 'netapp_snaplock_type': 'enterprise', } } @@ -1049,6 +1080,7 @@ 'share_server_multiple_subnet_support': True, 'netapp_flexgroup': False, 'netapp_cluster_name': 'fake_cluster_name', + 'netapp_snaplock_type': 'compliance', }, { 'pool_name': AGGREGATES[1], @@ -1078,6 +1110,7 @@ 'share_server_multiple_subnet_support': True, 'netapp_flexgroup': False, 'netapp_cluster_name': 'fake_cluster_name', + 'netapp_snaplock_type': 'compliance', }, ] @@ -1140,12 +1173,14 @@ 'raid-type': 'raid4', 'is-hybrid': False, 'is-home': False, + 'snaplock-type': 'compliance', }, { 'name': AGGREGATES[1], 'raid-type': 'raid_dp', 'is-hybrid': True, 'is-home': True, + 'snaplock-type': 'enterprise', }, ] diff --git a/manila/tests/share/drivers/nexenta/ns4/test_nexenta_nas.py b/manila/tests/share/drivers/nexenta/ns4/test_nexenta_nas.py index a7b3834770..33990220cf 100644 --- a/manila/tests/share/drivers/nexenta/ns4/test_nexenta_nas.py +++ b/manila/tests/share/drivers/nexenta/ns4/test_nexenta_nas.py @@ -442,6 +442,7 @@ def test_update_access__unsupported_access_type(self, post): share, [access], None, + None, None) @mock.patch(PATH_TO_RPC) @@ -485,7 +486,7 @@ def my_side_effect(*args, **kwargs): post.return_value = FakeResponse() post.side_effect = my_side_effect - self.drv.update_access(self.ctx, share, access_rules, None, None) + self.drv.update_access(self.ctx, share, access_rules, None, None, None) post.assert_called_with( self.request_params.url, data=self.request_params.build_post_args( @@ -498,7 +499,7 @@ def my_side_effect(*args, **kwargs): [access1, {'access_type': 'ip', 'access_to': '2.2.2.2', 'access_level': 'rw'}], - None, None) + None, None, None) @mock.patch(PATH_TO_RPC) def test_update_access__add_one_ip_to_empty_access_list(self, post): @@ -536,7 +537,7 @@ def my_side_effect(*args, **kwargs): raise exception.ManilaException('Unexpected request') post.return_value = FakeResponse() - self.drv.update_access(self.ctx, share, [access], None, None) + self.drv.update_access(self.ctx, share, [access], None, None, None) post.assert_called_with( self.request_params.url, data=self.request_params.build_post_args( @@ -552,7 +553,7 @@ def my_side_effect(*args, **kwargs): [{'access_type': 'ip', 'access_to': '1111', 'access_level': 'rw'}], - None, None) + None, None, None) @mock.patch(PATH_TO_RPC) def test_deny_access__unsupported_access_type(self, post): @@ -565,7 +566,7 @@ def test_deny_access__unsupported_access_type(self, post): } self.assertRaises(exception.InvalidShareAccess, self.drv.update_access, - self.ctx, share, [access], None, None) + self.ctx, share, [access], None, None, None) def test_share_backend_name(self): self.assertEqual('NexentaStor', self.drv.share_backend_name) diff --git a/manila/tests/share/drivers/nexenta/ns5/test_nexenta_nas.py b/manila/tests/share/drivers/nexenta/ns5/test_nexenta_nas.py index 74a55cf816..e692ac64ed 100644 --- a/manila/tests/share/drivers/nexenta/ns5/test_nexenta_nas.py +++ b/manila/tests/share/drivers/nexenta/ns5/test_nexenta_nas.py @@ -341,7 +341,7 @@ def test_update_access__ip_rw(self, update_nfs_access): self.assertEqual( {'fake_id': {'state': 'active'}}, self.drv.update_access( - self.ctx, SHARE, [access], None, None)) + self.ctx, SHARE, [access], None, None, None)) self.drv._update_nfs_access.assert_called_with(SHARE, ['1.1.1.1'], []) @mock.patch('%s._update_nfs_access' % DRV_PATH) @@ -356,7 +356,7 @@ def test_update_access__ip_ro(self, update_nfs_access): expected = {'fake_id': {'state': 'active'}} self.assertEqual( expected, self.drv.update_access( - self.ctx, SHARE, [access], None, None)) + self.ctx, SHARE, [access], None, None, None)) self.drv._update_nfs_access.assert_called_with(SHARE, [], ['1.1.1.1']) @ddt.data('rw', 'ro') @@ -369,7 +369,7 @@ def test_update_access__not_ip(self, access_level): } expected = {'fake_id': {'state': 'error'}} self.assertEqual(expected, self.drv.update_access( - self.ctx, SHARE, [access], None, None)) + self.ctx, SHARE, [access], None, None, None)) @mock.patch('%s._get_capacity_info' % DRV_PATH) @mock.patch('manila.share.driver.ShareDriver._update_share_stats') diff --git a/manila/tests/share/drivers/purestorage/test_flashblade.py b/manila/tests/share/drivers/purestorage/test_flashblade.py index 33777e62c4..9ead0bd3c4 100644 --- a/manila/tests/share/drivers/purestorage/test_flashblade.py +++ b/manila/tests/share/drivers/purestorage/test_flashblade.py @@ -361,7 +361,7 @@ def test_update_access_share(self): } rule_map = self.driver.update_access( - None, test_nfs_share, access_rules, [], [] + None, test_nfs_share, access_rules, [], [], [] ) self.assertEqual(expected_rule_map, rule_map) diff --git a/manila/tests/share/drivers/qnap/test_qnap.py b/manila/tests/share/drivers/qnap/test_qnap.py index 884d65cda6..529f6b2e35 100644 --- a/manila/tests/share/drivers/qnap/test_qnap.py +++ b/manila/tests/share/drivers/qnap/test_qnap.py @@ -858,7 +858,7 @@ def test_update_access_allow_access( private_storage=mock_private_storage) self.driver.update_access( 'context', self.share, 'access_rules', - None, None, share_server=None) + None, None, None, share_server=None) mock_api_executor.return_value.set_nfs_access.assert_called_once_with( 'fakeVolName', 2, 'all') @@ -882,9 +882,10 @@ def test_update_access_deny_and_allow_access( delete_rules.append('access1') add_rules = [] add_rules.append('access1') + update_rules = [] self.driver.update_access( 'context', self.share, None, - add_rules, delete_rules, share_server=None) + add_rules, delete_rules, update_rules, share_server=None) mock_deny_access.assert_called_once_with( 'context', self.share, 'access1', None) @@ -907,6 +908,7 @@ def test_update_access_without_volname(self): access_rules='access_rules', add_rules=None, delete_rules=None, + update_rules=None, share_server=None) @mock.patch.object(qnap.QnapShareDriver, '_get_location_path') diff --git a/manila/tests/share/drivers/quobyte/test_quobyte.py b/manila/tests/share/drivers/quobyte/test_quobyte.py index 469962a409..f8d71da353 100644 --- a/manila/tests/share/drivers/quobyte/test_quobyte.py +++ b/manila/tests/share/drivers/quobyte/test_quobyte.py @@ -564,7 +564,8 @@ def test_update_access_add_delete(self, qb_deny_mock, qb_allow_mock): self.share, access_rules=None, add_rules=[access_1], - delete_rules=[access_2, access_3]) + delete_rules=[access_2, access_3], + update_rules=[]) qb_allow_mock.assert_called_once_with(self._context, self.share, access_1) @@ -575,7 +576,8 @@ def test_update_access_add_delete(self, qb_deny_mock, qb_allow_mock): @mock.patch.object(quobyte.LOG, "warning") def test_update_access_no_rules(self, qb_log_mock): self._driver.update_access(context=None, share=None, access_rules=[], - add_rules=[], delete_rules=[]) + add_rules=[], delete_rules=[], + update_rules=[]) qb_log_mock.assert_has_calls([mock.ANY]) @@ -600,7 +602,7 @@ def test_update_access_recovery_additionals(self, self._driver.update_access(self._context, self.share, access_rules=add_access_rules, add_rules=[], - delete_rules=[]) + delete_rules=[], update_rules=[]) assert_calls = [mock.call(self._context, self.share, new_access_1), mock.call(self._context, self.share, new_access_2)] @@ -627,7 +629,7 @@ def test_update_access_recovery_superfluous(self, self._driver.update_access(self._context, self.share, access_rules=old_access_rules, add_rules=[], - delete_rules=[]) + delete_rules=[], update_rules=[]) qb_deny_mock.assert_called_once_with(self._context, self.share, @@ -665,7 +667,7 @@ def test_update_access_recovery_add_superfluous(self, self._driver.update_access(self._context, self.share, new_access_rules, add_rules=[], - delete_rules=[]) + delete_rules=[], update_rules=[]) a_calls = [mock.call(self._context, self.share, new_access_1), mock.call(self._context, self.share, new_access_2)] diff --git a/manila/tests/share/drivers/tegile/test_tegile.py b/manila/tests/share/drivers/tegile/test_tegile.py index 0979b70a68..8d59ce6dd3 100644 --- a/manila/tests/share/drivers/tegile/test_tegile.py +++ b/manila/tests/share/drivers/tegile/test_tegile.py @@ -730,7 +730,8 @@ def test_update_access(self, access_rules, add_rules, test_share, access_rules=access_rules, add_rules=add_rules, - delete_rules=delete_rules) + delete_rules=delete_rules, + update_rules=None) allow_params = ( '%s/%s/%s/%s' % ( diff --git a/manila/tests/share/drivers/test_ganesha.py b/manila/tests/share/drivers/test_ganesha.py index 63fd985e55..d3d6a3bae4 100644 --- a/manila/tests/share/drivers/test_ganesha.py +++ b/manila/tests/share/drivers/test_ganesha.py @@ -271,7 +271,7 @@ def test_update_access_for_allow(self): self._helper.update_access( self._context, self.share, access_rules=[self.access], - add_rules=[self.access], delete_rules=[]) + add_rules=[self.access], delete_rules=[], update_rules=[]) self._helper._allow_access.assert_called_once_with( '/', self.share, self.access) @@ -286,7 +286,7 @@ def test_update_access_for_deny(self): self._helper.update_access( self._context, self.share, access_rules=[], - add_rules=[], delete_rules=[self.access]) + add_rules=[], delete_rules=[self.access], update_rules=[]) self._helper._deny_access.assert_called_once_with( '/', self.share, self.access) @@ -301,7 +301,7 @@ def test_update_access_recovery(self): self._helper.update_access( self._context, self.share, access_rules=[self.access], - add_rules=[], delete_rules=[]) + add_rules=[], delete_rules=[], update_rules=[]) self._helper._allow_access.assert_called_once_with( '/', self.share, self.access) @@ -316,7 +316,8 @@ def test_update_access_invalid_share_access_type(self): result = self._helper.update_access(self._context, self.share, access_rules=[bad_rule], - add_rules=[], delete_rules=[]) + add_rules=[], delete_rules=[], + update_rules=[]) self.assertEqual(expected, result) @@ -326,7 +327,8 @@ def test_update_access_invalid_share_access_level(self): result = self._helper.update_access(self._context, self.share, access_rules=[bad_rule], - add_rules=[], delete_rules=[]) + add_rules=[], delete_rules=[], + update_rules=[]) self.assertEqual(expected, result) @@ -484,7 +486,7 @@ def test_update_access_add_export(self): self._helper.update_access( self._context, self.share, access_rules=[self.rule1], - add_rules=[], delete_rules=[]) + add_rules=[], delete_rules=[], update_rules=[]) mock_gh.check_export_exists.assert_called_once_with('fakename') mock_gh.get_export_id.assert_called_once_with() @@ -520,7 +522,7 @@ def test_update_access_update_export(self, client): self._helper.update_access( self._context, self.share, access_rules=[self.rule1, self.rule2], - add_rules=[self.rule2], delete_rules=[]) + add_rules=[self.rule2], delete_rules=[], update_rules=[]) mock_gh.check_export_exists.assert_called_once_with('fakename') mock_gh.update_export.assert_called_once_with('fakename', @@ -541,7 +543,7 @@ def test_update_access_remove_export(self): self._helper.update_access( self._context, self.share, access_rules=[], - add_rules=[], delete_rules=[self.rule1]) + add_rules=[], delete_rules=[self.rule1], update_rules=[]) mock_gh.check_export_exists.assert_called_once_with('fakename') mock_gh.remove_export.assert_called_once_with('fakename') @@ -559,7 +561,7 @@ def test_update_access_export_file_already_removed(self): self._helper.update_access( self._context, self.share, access_rules=[], - add_rules=[], delete_rules=[self.rule1]) + add_rules=[], delete_rules=[self.rule1], update_rules=[]) mock_gh.check_export_exists.assert_called_once_with('fakename') ganesha.LOG.warning.assert_called_once_with(mock.ANY, mock.ANY) @@ -577,7 +579,8 @@ def test_update_access_invalid_share_access_type(self): result = self._helper.update_access(self._context, self.share, access_rules=[bad_rule], - add_rules=[], delete_rules=[]) + add_rules=[], delete_rules=[], + update_rules=[]) self.assertEqual(expected, result) @@ -591,6 +594,7 @@ def test_update_access_invalid_share_access_level(self): result = self._helper.update_access(self._context, self.share, access_rules=[bad_rule], - add_rules=[], delete_rules=[]) + add_rules=[], delete_rules=[], + update_rules=[]) self.assertEqual(expected, result) diff --git a/manila/tests/share/drivers/test_generic.py b/manila/tests/share/drivers/test_generic.py index e85022d6d1..3bdf1ca449 100644 --- a/manila/tests/share/drivers/test_generic.py +++ b/manila/tests/share/drivers/test_generic.py @@ -305,6 +305,7 @@ def test_mount_device_not_present(self): server = {'instance_id': 'fake_server_id'} mount_path = self._driver._get_mount_path(self.share) volume = {'mountpoint': 'fake_mount_point'} + device_path = volume['mountpoint'] self.mock_object(self._driver, '_is_device_mounted', mock.Mock(return_value=False)) self.mock_object(self._driver, '_add_mount_permanently') @@ -316,7 +317,7 @@ def test_mount_device_not_present(self): self._driver._is_device_mounted.assert_called_once_with( mount_path, server, volume) self._driver._add_mount_permanently.assert_called_once_with( - self.share.id, server) + self.share.id, device_path, server) self._driver._ssh_exec.assert_called_once_with( server, ( 'sudo', 'mkdir', '-p', mount_path, @@ -490,17 +491,29 @@ def test_is_device_mounted_false_no_volume_provided(self): self.assertFalse(result) def test_add_mount_permanently(self): - self.mock_object(self._driver, '_ssh_exec') - self._driver._add_mount_permanently(self.share.id, self.server) + device_path = '/fake/mount/path' + device_uuid = 'fake_disk_uuid' + formated_device_uuid = f"UUID={device_uuid}" + self.mock_object(self._driver, '_ssh_exec', + mock.Mock(return_value=(device_uuid, ''))) + self._driver._add_mount_permanently(self.share.id, device_path, + self.server) self._driver._ssh_exec.assert_has_calls([ mock.call( self.server, ['grep', self.share.id, const.MOUNT_FILE_TEMP, '|', 'sudo', 'tee', '-a', const.MOUNT_FILE]), + mock.call(self.server, ['lsblk', '-o', 'uuid', + '-n', device_path]), + mock.call( + self.server, + ['sudo', 'sed', '-i', "s@{}@{}@".format(device_path, + formated_device_uuid), const.MOUNT_FILE]), mock.call(self.server, ['sudo', 'mount', '-a']) ]) def test_add_mount_permanently_raise_error_on_add(self): + device_path = 'fake_device_path' self.mock_object( self._driver, '_ssh_exec', mock.Mock(side_effect=exception.ProcessExecutionError)) @@ -508,6 +521,7 @@ def test_add_mount_permanently_raise_error_on_add(self): exception.ShareBackendException, self._driver._add_mount_permanently, self.share.id, + device_path, self.server ) self._driver._ssh_exec.assert_called_once_with( @@ -1166,6 +1180,7 @@ def test_update_access(self, access_level): self._driver.update_access(self._context, self.share, access_rules, add_rules=add_rules, delete_rules=delete_rules, + update_rules=None, share_server=self.server) # asserts diff --git a/manila/tests/share/drivers/test_helpers.py b/manila/tests/share/drivers/test_helpers.py index 072c31e69b..2f29c44906 100644 --- a/manila/tests/share/drivers/test_helpers.py +++ b/manila/tests/share/drivers/test_helpers.py @@ -256,11 +256,12 @@ def test_update_access_delete_invalid_rule( def test_get_host_list(self): fake_exportfs = ('/shares/share-1\n\t\t20.0.0.3\n' '/shares/share-1\n\t\t20.0.0.6\n' + '/shares/share-1\n\t\t\n' '/shares/share-2\n\t\t10.0.0.2\n' '/shares/share-2\n\t\t10.0.0.5\n' '/shares/share-3\n\t\t30.0.0.4\n' '/shares/share-3\n\t\t30.0.0.7\n') - expected = ['20.0.0.3', '20.0.0.6'] + expected = ['20.0.0.3', '20.0.0.6', '*'] result = self._helper.get_host_list(fake_exportfs, '/shares/share-1') self.assertEqual(expected, result) @@ -408,7 +409,7 @@ def fake_ssh_exec(*args, **kwargs): '"{}"'.format(':'.join(['1.1.1.16', local_path]))]), mock.call(self.server, ['sudo', 'exportfs', '-u', - '"{}"'.format(':'.join(['', local_path]))]), + '"{}"'.format(':'.join(['*', local_path]))]), ]) self._helper._sync_nfs_temp_and_perm_files.assert_called_once_with( diff --git a/manila/tests/share/drivers/test_lvm.py b/manila/tests/share/drivers/test_lvm.py index e834762c75..0331759654 100644 --- a/manila/tests/share/drivers/test_lvm.py +++ b/manila/tests/share/drivers/test_lvm.py @@ -483,6 +483,7 @@ def test_update_access(self, access_level): self._driver.update_access(self._context, self.share, access_rules, add_rules=add_rules, delete_rules=delete_rules, + update_rules=None, share_server=self.server) (self._driver._helpers[self.share['share_proto']]. update_access.assert_called_once_with( diff --git a/manila/tests/share/drivers/test_service_instance.py b/manila/tests/share/drivers/test_service_instance.py index 69b4616fc2..6ecc5d832d 100644 --- a/manila/tests/share/drivers/test_service_instance.py +++ b/manila/tests/share/drivers/test_service_instance.py @@ -1617,6 +1617,15 @@ def test_get_service_network_id_two_exist(self): @ddt.data(dict(), dict(subnet_id='foo'), dict(router_id='bar')) def test_teardown_network_no_service_data(self, server_details): + fake_ports = [ + {'device_id': 'fake_device_id', + 'device_owner': 'compute:foo'}, + ] + self.mock_object( + service_instance.neutron.API, 'update_subnet') + self.mock_object( + service_instance.neutron.API, 'list_ports', + mock.Mock(return_value=fake_ports)) instance = self._init_neutron_network_plugin() self.mock_object( service_instance.neutron.API, 'router_remove_interface') @@ -1757,6 +1766,31 @@ def test_teardown_network_subnet_not_used(self): service_instance.neutron.API.list_ports.assert_called_once_with( fields=['device_id', 'device_owner'], fixed_ips=['subnet_id=foo']) + def test_teardown_network_subnet_not_used_with_no_router_id(self): + server_details = dict(subnet_id='foo') + fake_ports = [ + {'device_id': 'fake_device_id', + 'device_owner': 'compute'}, + {'device_id': '', + 'device_owner': 'compute'}, + ] + instance = self._init_neutron_network_plugin() + self.mock_object( + service_instance.neutron.API, 'router_remove_interface') + self.mock_object( + service_instance.neutron.API, 'update_subnet') + self.mock_object( + service_instance.neutron.API, 'list_ports', + mock.Mock(return_value=fake_ports)) + + instance.teardown_network(server_details) + self.assertFalse( + service_instance.neutron.API.router_remove_interface.called) + (service_instance.neutron.API.update_subnet. + assert_called_once_with('foo', '')) + service_instance.neutron.API.list_ports.assert_called_once_with( + fields=['device_id', 'device_owner'], fixed_ips=['subnet_id=foo']) + def test_teardown_network_subnet_not_used_and_get_error_404(self): server_details = dict(subnet_id='foo', router_id='bar') fake_ports = [ diff --git a/manila/tests/share/drivers/veritas/test_veritas_isa.py b/manila/tests/share/drivers/veritas/test_veritas_isa.py index 08b0183c84..e88921e36f 100644 --- a/manila/tests/share/drivers/veritas/test_veritas_isa.py +++ b/manila/tests/share/drivers/veritas/test_veritas_isa.py @@ -283,7 +283,7 @@ def test_delete_snapshot_if_not_present_at_backend(self): def test_update_access_for_allow(self): self.mock_object(self._driver, '_access_api') self._driver.update_access(self._context, self.share, [], - [self.access], []) + [self.access], [], []) self.assertEqual(2, self._driver._access_api.call_count) def test_update_access_for_allow_negative(self): @@ -292,22 +292,22 @@ def test_update_access_for_allow_negative(self): self.assertRaises(exception.ShareBackendException, self._driver.update_access, self._context, - self.share, [], [self.access], []) + self.share, [], [self.access], [], []) self.assertRaises(exception.InvalidShareAccess, self._driver.update_access, self._context, - self.share, [], [self.access2], []) + self.share, [], [self.access2], [], []) self.assertRaises(exception.InvalidShareAccessLevel, self._driver.update_access, self._context, - self.share, [], [self.access3], []) + self.share, [], [self.access3], [], []) def test_update_access_for_deny(self): self.mock_object(self._driver, '_access_api') self._driver.update_access(self._context, self.share, - [], [], [self.access]) + [], [], [self.access], []) self.assertEqual(2, self._driver._access_api.call_count) def test_update_access_for_deny_negative(self): @@ -316,19 +316,19 @@ def test_update_access_for_deny_negative(self): self.assertRaises(exception.ShareBackendException, self._driver.update_access, self._context, - self.share, [], [], [self.access]) + self.share, [], [], [self.access], []) def test_update_access_for_deny_for_invalid_access_type(self): self.mock_object(self._driver, '_access_api') self._driver.update_access(self._context, self.share, - [], [], [self.access2]) + [], [], [self.access2], []) self.assertEqual(0, self._driver._access_api.call_count) def test_update_access_for_empty_rule_list(self): self.mock_object(self._driver, '_allow_access') self.mock_object(self._driver, '_deny_access') self._driver.update_access(self._context, self.share, - [], [], []) + [], [], [], []) self.assertEqual(0, self._driver._allow_access.call_count) self.assertEqual(0, self._driver._deny_access.call_count) @@ -351,7 +351,7 @@ def test_update_access_for_access_rules(self): a_rule = self._driver._return_access_lists_difference([self.access4], existing_a_rules) self._driver.update_access(self._context, self.share, - [self.access4], [], []) + [self.access4], [], [], []) self.assertEqual(d_rule, existing_a_rules) self.assertEqual(a_rule, [self.access4]) diff --git a/manila/tests/share/drivers/windows/test_windows_smb_helper.py b/manila/tests/share/drivers/windows/test_windows_smb_helper.py index 87a005352b..4174e4a52f 100644 --- a/manila/tests/share/drivers/windows/test_windows_smb_helper.py +++ b/manila/tests/share/drivers/windows/test_windows_smb_helper.py @@ -214,7 +214,7 @@ def test_update_access_invalid_type(self): exception.InvalidShareAccess, self._win_smb_helper.update_access, mock.sentinel.server, mock.sentinel.share_name, - [invalid_access_rule], [], []) + [invalid_access_rule], [], [], []) def test_update_access_invalid_level(self): invalid_access_rule = dict(self._FAKE_RW_ACC_RULE, @@ -223,7 +223,7 @@ def test_update_access_invalid_level(self): exception.InvalidShareAccessLevel, self._win_smb_helper.update_access, mock.sentinel.server, mock.sentinel.share_name, - [], [invalid_access_rule], []) + [], [invalid_access_rule], [], []) @mock.patch.object(windows_smb_helper.WindowsSMBHelper, '_revoke_share_access') @@ -235,7 +235,7 @@ def test_update_access_deleting_invalid_rule(self, mock_revoke): self._win_smb_helper.update_access( mock.sentinel.server, mock.sentinel.share_name, - [], [], delete_rules) + [], [], delete_rules, []) mock_revoke.assert_called_once_with( mock.sentinel.server, mock.sentinel.share_name, @@ -256,7 +256,7 @@ def test_update_access(self, mock_revoke, mock_grant, self._win_smb_helper.update_access( mock.sentinel.server, mock.sentinel.share_name, - [], added_rules, deleted_rules) + [], added_rules, deleted_rules, []) mock_revoke.assert_has_calls( [mock.call(mock.sentinel.server, mock.sentinel.share_name, @@ -291,7 +291,7 @@ def test_update_access_maintenance( self._win_smb_helper.update_access( mock.sentinel.server, mock.sentinel.share_name, - all_rules, [], []) + all_rules, [], [], []) mock_get_access_rules.assert_called_once_with( mock.sentinel.server, mock.sentinel.share_name) diff --git a/manila/tests/share/drivers/zfsonlinux/test_driver.py b/manila/tests/share/drivers/zfsonlinux/test_driver.py index 262360816b..d5f66d253f 100644 --- a/manila/tests/share/drivers/zfsonlinux/test_driver.py +++ b/manila/tests/share/drivers/zfsonlinux/test_driver.py @@ -1152,7 +1152,7 @@ def test_update_access(self): } result = self.driver.update_access( - 'fake_context', share, [1], [2], [3]) + 'fake_context', share, [1], [2], [3], []) self.driver._get_dataset_name.assert_called_once_with(share) mock_shell_executor.assert_called_once_with(share['host']) diff --git a/manila/tests/share/test_access.py b/manila/tests/share/test_access.py index 60aa8ca35c..7f8f69a0f6 100644 --- a/manila/tests/share/test_access.py +++ b/manila/tests/share/test_access.py @@ -247,7 +247,8 @@ def test_update_access_rules_an_update_is_in_progress(self, initial_state): expected_filters = { 'state': (constants.ACCESS_STATE_APPLYING, - constants.ACCESS_STATE_DENYING), + constants.ACCESS_STATE_DENYING, + constants.ACCESS_STATE_UPDATING), } self.assertIsNone(retval) mock_debug_log.assert_called_once() @@ -273,17 +274,21 @@ def test_update_access_rules_nothing_to_update(self): expected_rule_filter_1 = { 'state': (constants.ACCESS_STATE_APPLYING, - constants.ACCESS_STATE_DENYING), + constants.ACCESS_STATE_DENYING, + constants.ACCESS_STATE_UPDATING), } expected_rule_filter_2 = { 'state': (constants.ACCESS_STATE_QUEUED_TO_APPLY, - constants.ACCESS_STATE_QUEUED_TO_DENY), + constants.ACCESS_STATE_QUEUED_TO_DENY, + constants.ACCESS_STATE_QUEUED_TO_UPDATE), } expected_conditionally_change = { constants.ACCESS_STATE_QUEUED_TO_APPLY: constants.ACCESS_STATE_APPLYING, constants.ACCESS_STATE_QUEUED_TO_DENY: constants.ACCESS_STATE_DENYING, + constants.ACCESS_STATE_QUEUED_TO_UPDATE: + constants.ACCESS_STATE_UPDATING, } self.assertIsNone(retval) mock_debug_log.assert_called_once() @@ -320,17 +325,21 @@ def test_update_access_rules_delete_all_rules(self, delete_all_rules): expected_rule_filter_1 = { 'state': (constants.ACCESS_STATE_APPLYING, - constants.ACCESS_STATE_DENYING), + constants.ACCESS_STATE_DENYING, + constants.ACCESS_STATE_UPDATING), } expected_rule_filter_2 = { 'state': (constants.ACCESS_STATE_QUEUED_TO_APPLY, - constants.ACCESS_STATE_QUEUED_TO_DENY), + constants.ACCESS_STATE_QUEUED_TO_DENY, + constants.ACCESS_STATE_QUEUED_TO_UPDATE), } expected_conditionally_change = { constants.ACCESS_STATE_QUEUED_TO_APPLY: constants.ACCESS_STATE_APPLYING, constants.ACCESS_STATE_QUEUED_TO_DENY: constants.ACCESS_STATE_DENYING, + constants.ACCESS_STATE_QUEUED_TO_UPDATE: + constants.ACCESS_STATE_UPDATING, } expected_get_and_update_calls = [] if delete_all_rules: @@ -422,7 +431,8 @@ def test__update_access_rules_with_driver_updates( expected_filters_1 = { 'state': (constants.ACCESS_STATE_APPLYING, constants.ACCESS_STATE_ACTIVE, - constants.ACCESS_STATE_DENYING), + constants.ACCESS_STATE_DENYING, + constants.ACCESS_STATE_UPDATING), } expected_filters_2 = {'state': constants.STATUS_ERROR} expected_get_and_update_calls = [ @@ -478,6 +488,7 @@ def test__update_access_rules_with_driver_updates( expected_conditional_state_updates = { constants.ACCESS_STATE_APPLYING: access_state, constants.ACCESS_STATE_DENYING: access_state, + constants.ACCESS_STATE_UPDATING: access_state, constants.ACCESS_STATE_ACTIVE: access_state, } expected_access_rule_update_calls = [ @@ -497,6 +508,7 @@ def test__update_access_rules_with_driver_updates( self.assertFalse(one_access_rule_update_call.called) expected_conditionally_change = { constants.ACCESS_STATE_APPLYING: constants.ACCESS_STATE_ACTIVE, + constants.ACCESS_STATE_UPDATING: constants.ACCESS_STATE_ACTIVE, } expected_get_and_update_calls.append( mock.call(self.context, share_instance_id=share_instance_id, @@ -570,17 +582,21 @@ def _driver_side_effect(*args, **kwargs): } expected_filters_3 = { 'state': (constants.ACCESS_STATE_QUEUED_TO_APPLY, - constants.ACCESS_STATE_QUEUED_TO_DENY), + constants.ACCESS_STATE_QUEUED_TO_DENY, + constants.ACCESS_STATE_QUEUED_TO_UPDATE), } expected_conditionally_change_3 = { constants.ACCESS_STATE_QUEUED_TO_APPLY: constants.ACCESS_STATE_APPLYING, constants.ACCESS_STATE_QUEUED_TO_DENY: constants.ACCESS_STATE_DENYING, + constants.ACCESS_STATE_QUEUED_TO_UPDATE: + constants.ACCESS_STATE_UPDATING, } expected_conditionally_change_4 = { constants.ACCESS_STATE_APPLYING: constants.ACCESS_STATE_ERROR, constants.ACCESS_STATE_DENYING: constants.ACCESS_STATE_ERROR, + constants.ACCESS_STATE_UPDATING: constants.ACCESS_STATE_ERROR, } expected_get_and_update_calls = [ mock.call(self.context, filters=expected_filters_1, @@ -701,13 +717,16 @@ def test__check_needs_refresh(self, expected_needs_refresh): expected_filter = { 'state': (constants.ACCESS_STATE_QUEUED_TO_APPLY, - constants.ACCESS_STATE_QUEUED_TO_DENY), + constants.ACCESS_STATE_QUEUED_TO_DENY, + constants.ACCESS_STATE_QUEUED_TO_UPDATE), } expected_conditionally_change = { constants.ACCESS_STATE_QUEUED_TO_APPLY: constants.ACCESS_STATE_APPLYING, constants.ACCESS_STATE_QUEUED_TO_DENY: constants.ACCESS_STATE_DENYING, + constants.ACCESS_STATE_QUEUED_TO_UPDATE: + constants.ACCESS_STATE_UPDATING, } self.assertEqual(expected_needs_refresh, needs_refresh) @@ -740,9 +759,12 @@ def test__update_rules_through_share_driver(self, proto, pass_add_rules, fail_add_rules = self._get_pass_rules_and_fail_rules() pass_delete_rules, fail_delete_rules = ( self._get_pass_rules_and_fail_rules()) + pass_update_rules, fail_update_rules = ( + self._get_pass_rules_and_fail_rules()) test_rules = pass_rules + fail_rules test_add_rules = pass_add_rules + fail_add_rules test_delete_rules = pass_delete_rules + fail_delete_rules + test_update_rules = pass_update_rules + fail_update_rules fake_expect_driver_update_rules = pass_rules update_access_call = self.mock_object( @@ -754,6 +776,7 @@ def test__update_rules_through_share_driver(self, proto, access_rules_to_be_on_share=test_rules, add_rules=test_add_rules, delete_rules=test_delete_rules, + update_rules=test_update_rules, rules_to_be_removed_from_db=test_rules, share_server=None)) @@ -761,12 +784,14 @@ def test__update_rules_through_share_driver(self, proto, update_access_call.assert_called_once_with( self.context, share_instance, pass_rules, add_rules=pass_add_rules, - delete_rules=pass_delete_rules, share_server=None) + delete_rules=pass_delete_rules, + update_rules=pass_update_rules, + share_server=None) else: update_access_call.assert_called_once_with( self.context, share_instance, test_rules, add_rules=test_add_rules, delete_rules=test_delete_rules, - share_server=None) + update_rules=test_update_rules, share_server=None) self.assertEqual(fake_expect_driver_update_rules, driver_update_rules) def _get_pass_rules_and_fail_rules(self): diff --git a/manila/tests/share/test_api.py b/manila/tests/share/test_api.py index 1df56ec1e8..5ee4bfbb14 100644 --- a/manila/tests/share/test_api.py +++ b/manila/tests/share/test_api.py @@ -649,6 +649,96 @@ def test_get_all_filter_by_invalid_metadata(self): def test_get_all_filter_by_invalid_extra_specs(self): self._get_all_filter_metadata_or_extra_specs_invalid(key='extra_specs') + @ddt.data(True, False) + def test_update_metadata_from_share_type_extra_specs(self, with_metadata): + share_type = fakes.fake_share_type( + extra_specs={ + 'driver_handles_share_servers': 'False', + 'fake_driver:dedupe': 'True', + 'fake_driver:encrypt': 'True', + 'fake_driver:snapshot_policy': 'daily', + 'provisioning:max_share_size': '10', + } + ) + + user_metadata = {} + if with_metadata: + user_metadata = { + 'snapshot_policy': 'monthly', + 'tag': 't1', + 'max_share_size': '5', + } + + CONF.set_default( + "driver_updatable_metadata", + ['dedupe', 'snapshot_policy', 'thin_provisioning'], + ) + + result = self.api.update_metadata_from_share_type_extra_specs( + self.context, + share_type, + user_metadata + ) + + if with_metadata: + self.assertEqual( + result, + {'dedupe': 'True', 'snapshot_policy': 'monthly', 'tag': 't1', + 'max_share_size': '5'}) + else: + self.assertEqual( + result, + {'dedupe': 'True', 'snapshot_policy': 'daily'}) + + def test_update_share_network_subnet_from_metadata(self): + CONF.set_default( + "driver_updatable_subnet_metadata", + ['dedupe', 'snapshot_policy', 'thin_provisioning'], + ) + metadata = { + 'test_key': 'True', + 'snapshot_policy': 'monthly', + } + backend_metadata = { + k: v for k, v in + metadata.items() if k in CONF.driver_updatable_subnet_metadata} + + self.mock_object( + db_api, 'share_server_get_all_by_host_and_or_share_subnet', + mock.Mock(return_value=['fake_share_server'])) + mock_call = self.mock_object( + self.api.share_rpcapi, + 'update_share_network_subnet_from_metadata') + + self.api.update_share_network_subnet_from_metadata( + self.context, 'fake_sn_id', 'fake_sn_subnet_id', metadata) + mock_call.assert_called_once_with( + self.context, 'fake_sn_id', 'fake_sn_subnet_id', + 'fake_share_server', backend_metadata) + + def test_update_share_from_metadata(self): + CONF.set_default( + "driver_updatable_metadata", + ['dedupe', 'snapshot_policy', 'thin_provisioning'], + ) + metadata = { + 'dedupe': 'True', + 'snapshot_policy': 'monthly', + 'max_share_size': '10' + } + backend_metadata = { + k: v for k, v in metadata.items() if k != 'max_share_size'} + + self.mock_object(self.api, 'get', mock.Mock(return_value='fake_share')) + mock_call = self.mock_object( + self.api.share_rpcapi, + 'update_share_from_metadata' + ) + + self.api.update_share_from_metadata(self.context, 'fake_id', metadata) + mock_call.assert_called_once_with( + self.context, 'fake_share', backend_metadata) + @ddt.data(True, False) def test_create_public_and_private_share(self, is_public): share, share_data = self._setup_create_mocks(is_public=is_public) @@ -5982,13 +6072,16 @@ def test_share_server_migration_start(self): share_type = db_api.share_type_get(self.context, share_type['id']) fake_shares = [db_utils.create_share( host='fake@backend#pool', status=constants.STATUS_AVAILABLE, - share_type_id=share_type['id']) for x in range(4)] + share_type_id=share_type['id'], + share_server_id=fake_share_server['id']) for x in range(4)] fake_snapshots = [ db_utils.create_snapshot(share_id=fake_shares[0]['id'])] instance_ids = [share['instance']['id'] for share in fake_shares] + snap_instances = [] snap_instance_ids = [] for fake_share in fake_shares: for snapshot in fake_snapshots: + snap_instances.append({'id': snapshot['instance']['id']}) snap_instance_ids.append(snapshot['instance']['id']) fake_types = [share_type] fake_share_network = db_utils.create_share_network() @@ -6007,9 +6100,6 @@ def test_share_server_migration_start(self): share_expected_update = { 'status': constants.STATUS_SERVER_MIGRATING } - snapshot_get_calls = [ - mock.call(self.context, share['id']) for share in fake_shares] - mock_initial_checks = self.mock_object( self.api, '_migration_initial_checks', mock.Mock(return_value=[fake_shares, fake_types, service, @@ -6018,8 +6108,8 @@ def test_share_server_migration_start(self): self.share_rpcapi, 'share_server_migration_start') mock_server_update = self.mock_object(db_api, 'share_server_update') mock_snapshots_get = self.mock_object( - db_api, 'share_snapshot_get_all_for_share', - mock.Mock(return_value=fake_snapshots)) + db_api, 'share_snapshot_instance_get_all_with_filters', + mock.Mock(return_value=snap_instances)) mock_update_instances = self.mock_object( db_api, 'share_and_snapshot_instances_status_update') @@ -6036,8 +6126,7 @@ def test_share_server_migration_start(self): ) mock_server_update.assert_called_once_with( self.context, fake_share_server['id'], server_expected_update) - mock_snapshots_get.assert_has_calls( - snapshot_get_calls) + mock_snapshots_get.assert_called() mock_update_instances.assert_called_once_with( self.context, share_expected_update, current_expected_status=constants.STATUS_AVAILABLE, diff --git a/manila/tests/share/test_driver.py b/manila/tests/share/test_driver.py index e2a3c73a1c..9a6a6c1ee0 100644 --- a/manila/tests/share/test_driver.py +++ b/manila/tests/share/test_driver.py @@ -698,7 +698,8 @@ def test_update_access(self): 'fake_share', 'fake_access_rules', 'fake_add_rules', - 'fake_delete_rules' + 'fake_delete_rules', + 'fake_update_rules' ) def test_create_replica(self): diff --git a/manila/tests/share/test_manager.py b/manila/tests/share/test_manager.py index d2e8a4c5ff..bf414ca79e 100644 --- a/manila/tests/share/test_manager.py +++ b/manila/tests/share/test_manager.py @@ -208,6 +208,11 @@ def test_ensure_driver_resources_driver_needs_to_reapply_rules( 'reapply_access_rules': driver_needs_to_reapply_rules, }, } + fake_service = {'id': 'fake_service_id', 'binary': 'manila-share'} + self.mock_object(self.share_manager.db, + 'service_get_by_args', + mock.Mock(return_value=fake_service)) + self.mock_object(self.share_manager.db, 'service_update') mock_backend_info_update = self.mock_object( self.share_manager.db, 'backend_info_update') mock_share_get_all_by_host = self.mock_object( @@ -263,6 +268,23 @@ def test_ensure_driver_resources_driver_needs_to_reapply_rules( mock.call(utils.IsAMatcher(context.RequestContext), instances[2]), ]) + self.share_manager.db.service_get_by_args.assert_called_once_with( + utils.IsAMatcher(context.RequestContext), + self.share_manager.host, + 'manila-share' + ) + self.share_manager.db.service_update.assert_has_calls([ + mock.call( + utils.IsAMatcher(context.RequestContext), + fake_service['id'], + {'ensuring': True} + ), + mock.call( + utils.IsAMatcher(context.RequestContext), + fake_service['id'], + {'ensuring': False} + ) + ]) if driver_needs_to_reapply_rules: # don't care if share_instance['access_rules_status'] is "syncing" mock_reset_rules_method.assert_has_calls([ @@ -301,6 +323,11 @@ def test_ensure_driver_resources_share_metadata_updates(self): 'metadata': metadata_updates, }, } + fake_service = {'id': 'fake_service_id', 'binary': 'manila-share'} + self.mock_object(self.share_manager.db, + 'service_get_by_args', + mock.Mock(return_value=fake_service)) + self.mock_object(self.share_manager.db, 'service_update') mock_backend_info_update = self.mock_object( self.share_manager.db, 'backend_info_update') mock_share_get_all_by_host = self.mock_object( @@ -359,6 +386,23 @@ def test_ensure_driver_resources_share_metadata_updates(self): ]) # none of the share instances in the fake data have syncing rules mock_reset_rules_method.assert_not_called() + self.share_manager.db.service_get_by_args.assert_called_once_with( + utils.IsAMatcher(context.RequestContext), + self.share_manager.host, + 'manila-share' + ) + self.share_manager.db.service_update.assert_has_calls([ + mock.call( + utils.IsAMatcher(context.RequestContext), + fake_service['id'], + {'ensuring': True} + ), + mock.call( + utils.IsAMatcher(context.RequestContext), + fake_service['id'], + {'ensuring': False} + ) + ]) def test_init_host_with_no_shares(self): self.mock_object(self.share_manager.db, @@ -520,6 +564,7 @@ def raise_share_access_exists(*args, **kwargs): } instances[0]['access_rules_status'] = '' instances[2]['access_rules_status'] = '' + fake_service = {'id': 'fake_service_id', 'binary': 'manila-share'} self.mock_object(self.share_manager.db, 'backend_info_get', mock.Mock(return_value=old_backend_info)) @@ -530,6 +575,10 @@ def raise_share_access_exists(*args, **kwargs): mock_share_get_all_by_host = self.mock_object( self.share_manager.db, 'share_instance_get_all_by_host', mock.Mock(return_value=instances)) + self.mock_object(self.share_manager.db, + 'service_get_by_args', + mock.Mock(return_value=fake_service)) + self.mock_object(self.share_manager.db, 'service_update') self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(side_effect=[instances[0], instances[2], instances[4]])) @@ -607,6 +656,23 @@ def raise_share_access_exists(*args, **kwargs): mock.call(mock.ANY, instances[2]['id'], share_server=share_server), ])) + self.share_manager.db.service_get_by_args.assert_called_once_with( + utils.IsAMatcher(context.RequestContext), + self.share_manager.host, + 'manila-share' + ) + self.share_manager.db.service_update.assert_has_calls([ + mock.call( + utils.IsAMatcher(context.RequestContext), + fake_service['id'], + {'ensuring': True} + ), + mock.call( + utils.IsAMatcher(context.RequestContext), + fake_service['id'], + {'ensuring': False} + ) + ]) @ddt.data(("some_hash", {"db_version": "test_version"}), ("ddd86ec90923b686597501e2f2431f3af59238c0", @@ -623,6 +689,7 @@ def test_init_host_without_shares_and_rules( new_backend_info else None) mock_backend_info_update = self.mock_object( self.share_manager.db, 'backend_info_update') + fake_service = {'id': 'fake_service_id', 'binary': 'manila-share'} self.mock_object( self.share_manager.db, 'backend_info_get', mock.Mock(return_value=old_backend_info)) @@ -630,6 +697,10 @@ def test_init_host_without_shares_and_rules( mock.Mock(return_value=new_backend_info)) self.mock_object(self.share_manager, 'publish_service_capabilities', mock.Mock()) + self.mock_object(self.share_manager.db, + 'service_get_by_args', + mock.Mock(return_value=fake_service)) + self.mock_object(self.share_manager.db, 'service_update') mock_ensure_shares = self.mock_object( self.share_manager.driver, 'ensure_shares') mock_share_instance_get_all_by_host = self.mock_object( @@ -665,6 +736,7 @@ def raise_NotImplementedError(*args, **kwargs): instances = self._setup_init_mocks(setup_access_rules=False) share_server = fakes.fake_share_server_get() + fake_service = {'id': 'fake_service_id', 'binary': 'manila-share'} self.mock_object(self.share_manager.db, 'share_instance_get_all_by_host', mock.Mock(return_value=instances)) @@ -683,6 +755,10 @@ def raise_NotImplementedError(*args, **kwargs): self.mock_object(self.share_manager, '_get_share_server_dict', mock.Mock(return_value=share_server)) self.mock_object(self.share_manager, 'publish_service_capabilities') + self.mock_object(self.share_manager.db, + 'service_get_by_args', + mock.Mock(return_value=fake_service)) + self.mock_object(self.share_manager.db, 'service_update') self.mock_object(manager.LOG, 'error') self.mock_object(manager.LOG, 'info') @@ -730,6 +806,23 @@ def raise_NotImplementedError(*args, **kwargs): mock.ANY, {'id': instances[1]['id'], 'status': instances[1]['status']}, ) + self.share_manager.db.service_get_by_args.assert_called_once_with( + utils.IsAMatcher(context.RequestContext), + self.share_manager.host, + 'manila-share' + ) + self.share_manager.db.service_update.assert_has_calls([ + mock.call( + utils.IsAMatcher(context.RequestContext), + fake_service['id'], + {'ensuring': True} + ), + mock.call( + utils.IsAMatcher(context.RequestContext), + fake_service['id'], + {'ensuring': False} + ) + ]) def _get_share_instance_dict(self, share_instance, **kwargs): # TODO(gouthamr): remove method when the db layer returns primitives @@ -775,11 +868,16 @@ def raise_exception(*args, **kwargs): raise exception.ManilaException(message="Fake raise") instances = self._setup_init_mocks(setup_access_rules=False) + fake_service = {'id': 'fake_service_id', 'binary': 'manila-share'} mock_ensure_share = self.mock_object( self.share_manager.driver, 'ensure_share') self.mock_object(self.share_manager.db, 'share_instance_get_all_by_host', mock.Mock(return_value=instances)) + self.mock_object(self.share_manager.db, + 'service_get_by_args', + mock.Mock(return_value=fake_service)) + self.mock_object(self.share_manager.db, 'service_update') self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(side_effect=[instances[0], instances[2], instances[3]])) @@ -808,6 +906,20 @@ def raise_exception(*args, **kwargs): mock.call(utils.IsAMatcher(context.RequestContext), instances[0]), mock.call(utils.IsAMatcher(context.RequestContext), instances[2]), ]) + self.share_manager.db.service_get_by_args.assert_called_once_with( + utils.IsAMatcher(context.RequestContext), self.share_manager.host, + 'manila-share' + ) + self.share_manager.db.service_update.assert_has_calls([ + mock.call( + utils.IsAMatcher(context.RequestContext), fake_service['id'], + {'ensuring': True} + ), + mock.call( + utils.IsAMatcher(context.RequestContext), fake_service['id'], + {'ensuring': False} + ) + ]) self.share_manager.driver.ensure_shares.assert_called_once_with( utils.IsAMatcher(context.RequestContext), [dict_instances[0], dict_instances[2], dict_instances[3]]) @@ -854,11 +966,16 @@ def raise_exception(*args, **kwargs): instances[4]['id']: {'status': 'available'} } smanager = self.share_manager + fake_service = {'id': 'fake_service_id', 'binary': 'manila-share'} self.mock_object(smanager.db, 'share_instance_get_all_by_host', mock.Mock(return_value=instances)) self.mock_object(self.share_manager.db, 'share_instance_get', mock.Mock(side_effect=[instances[0], instances[2], instances[4]])) + self.mock_object(self.share_manager.db, + 'service_get_by_args', + mock.Mock(return_value=fake_service)) + self.mock_object(self.share_manager.db, 'service_update') self.mock_object(self.share_manager.driver, 'ensure_share', mock.Mock(return_value=None)) self.mock_object(self.share_manager.driver, 'ensure_shares', @@ -915,6 +1032,23 @@ def raise_exception(*args, **kwargs): manager.LOG.exception.assert_has_calls([ mock.call(mock.ANY, mock.ANY), ]) + self.share_manager.db.service_get_by_args.assert_called_once_with( + utils.IsAMatcher(context.RequestContext), + self.share_manager.host, + 'manila-share' + ) + self.share_manager.db.service_update.assert_has_calls([ + mock.call( + utils.IsAMatcher(context.RequestContext), + fake_service['id'], + {'ensuring': True} + ), + mock.call( + utils.IsAMatcher(context.RequestContext), + fake_service['id'], + {'ensuring': False} + ) + ]) def test_create_share_instance_from_snapshot_with_server(self): """Test share can be created from snapshot if server exists.""" @@ -10774,7 +10908,7 @@ def test_update_share_server_network_allocations(self): server = {'id': server_id} mock_servers_get = self.mock_object( self.share_manager.db, - 'share_server_get_all_by_host_and_share_subnet', + 'share_server_get_all_by_host_and_or_share_subnet', mock.Mock(return_value=[server])) current_network_allocations = 'fake_current_net_allocations' mock_form_net_allocations = self.mock_object( @@ -10810,7 +10944,8 @@ def test_update_share_server_network_allocations(self): self.context, net_id, new_subnet['availability_zone_id'], fallback_to_default=False) mock_servers_get.assert_called_once_with( - self.context, self.share_manager.host, new_share_network_subnet_id) + self.context, host=self.share_manager.host, + share_subnet_id=new_share_network_subnet_id) mock_form_net_allocations.assert_called_once_with( self.context, server['id'], subnets) mock_instances_get.assert_called_once_with( @@ -10844,7 +10979,7 @@ def test_update_share_server_network_allocations_failed(self): server = {'id': server_id} mock_servers_get = self.mock_object( self.share_manager.db, - 'share_server_get_all_by_host_and_share_subnet', + 'share_server_get_all_by_host_and_or_share_subnet', mock.Mock(return_value=[server])) current_network_allocations = 'fake_current_net_allocations' mock_form_net_allocations = self.mock_object( @@ -10886,7 +11021,8 @@ def test_update_share_server_network_allocations_failed(self): self.context, net_id, new_subnet['availability_zone_id'], fallback_to_default=False) mock_servers_get.assert_called_once_with( - self.context, self.share_manager.host, new_share_network_subnet_id) + self.context, host=self.share_manager.host, + share_subnet_id=new_share_network_subnet_id) mock_form_net_allocations.assert_called_once_with( self.context, server['id'], subnets) mock_instances_get.assert_called_once_with( @@ -10905,6 +11041,43 @@ def test_update_share_server_network_allocations_failed(self): mock_check_update_finished.assert_called_once_with( self.context, share_network_id=net_id) + def test_update_share_network_subnet_from_metadata(self): + share_server = fakes.fake_share_server_get() + share_network = db_utils.create_share_network(id='fake_sn_id') + share_net_subnet = db_utils.create_share_network_subnet( + id='fake_sns_id', share_network_id=share_network['id'] + ) + + self.mock_object( + self.share_manager.db, 'share_server_get', + mock.Mock(return_value=share_server)) + self.mock_object( + self.share_manager.db, 'share_network_get', + mock.Mock(return_value=share_network)) + self.mock_object( + self.share_manager.db, 'share_network_subnet_get', + mock.Mock(return_value=share_net_subnet)) + + metadata = {'showmount': 'true'} + mock_update = self.mock_object( + self.share_manager.driver, + 'update_share_network_subnet_from_metadata') + + self.share_manager.update_share_network_subnet_from_metadata( + self.context, share_network['id'], + share_net_subnet['id'], share_server['id'], metadata) + + mock_update.assert_called_once_with( + self.context, share_network, share_net_subnet, + share_server, metadata) + self.share_manager.message_api.create.assert_called_once_with( + utils.IsAMatcher(context.RequestContext), + message_field.Action.UPDATE_METADATA, + share_network['project_id'], + resource_type=message_field.Resource.SHARE_NETWORK_SUBNET, + resource_id=share_net_subnet['id'], + detail=message_field.Detail.UPDATE_METADATA_SUCCESS) + @ddt.ddt class HookWrapperTestCase(test.TestCase): diff --git a/manila/tests/share/test_rpcapi.py b/manila/tests/share/test_rpcapi.py index e1f3013dad..c6105fe1ea 100644 --- a/manila/tests/share/test_rpcapi.py +++ b/manila/tests/share/test_rpcapi.py @@ -49,6 +49,7 @@ def setUp(self): host = 'fake_host' share_server = db_utils.create_share_server(host=host) share_network_subnet = { + 'id': 'fake share network subnet', 'availability_zone_id': 'fake_az_id', 'neutron_net_id': 'fake_neutron_net_id', 'neutron_subnet_id': 'fake_neutron_subnet_id', @@ -130,7 +131,8 @@ def _test_share_api(self, method, rpc_method, **kwargs): expected_msg['snapshot_instance_id'] = snapshot_instance['id'] share_server_id_methods = [ 'manage_share_server', 'unmanage_share_server', - 'share_server_migration_start', 'share_server_migration_check'] + 'share_server_migration_start', 'share_server_migration_check', + 'update_share_network_subnet_from_metadata'] src_dest_share_server_methods = [ 'share_server_migration_cancel', 'share_server_migration_get_progress', @@ -147,6 +149,8 @@ def _test_share_api(self, method, rpc_method, **kwargs): and method in src_dest_share_server_methods): share_server = expected_msg.pop('dest_share_server', None) expected_msg['dest_share_server_id'] = share_server['id'] + if method == 'ensure_driver_resources': + expected_msg['skip_backend_info_check'] = True if 'host' in kwargs: host = kwargs['host'] @@ -391,6 +395,23 @@ def test_revert_to_snapshot(self): host='fake_host', reservations={'fake': 'fake'}) + def test_update_share_from_metadata(self): + self._test_share_api('update_share_from_metadata', + rpc_method='cast', + version='1.28', + share=self.fake_share, + metadata={'fake': 'fake'}) + + def test_update_share_network_subnet_from_metadata(self): + self._test_share_api( + 'update_share_network_subnet_from_metadata', + rpc_method='cast', + version='1.30', + share_network_id='fake_net_id', + share_network_subnet_id=self.fake_share_network_subnet['id'], + share_server=self.fake_share_server, + metadata={'fake': 'fake'}) + def test_create_replicated_snapshot(self): self._test_share_api('create_replicated_snapshot', rpc_method='cast', @@ -520,3 +541,11 @@ def test_update_share_server_network_allocations(self): dest_host=self.fake_host, share_network_id='fake_net_id', new_share_network_subnet_id='new_share_network_subnet_id') + + def test_ensure_driver_resources(self): + self._test_share_api( + 'ensure_driver_resources', + rpc_method='cast', + version='1.29', + host=self.fake_host, + ) diff --git a/manila/tests/test_utils.py b/manila/tests/test_utils.py index 99f5b325c0..e7e6a73d62 100644 --- a/manila/tests/test_utils.py +++ b/manila/tests/test_utils.py @@ -778,3 +778,20 @@ def test_is_all_tenants_invalid(self): search_opts = {'all_tenants': 'wonk'} self.assertRaises(exception.InvalidInput, utils.is_all_tenants, search_opts) + + @ddt.data( + ("8minutes", "PT8M"), + ("10hours", "PT10H"), + ("6months", "P6M"), + ("2years", "P2Y") + ) + @ddt.unpack + def test_convert_time_duration_to_iso_format(self, + time_duration, expected): + result = utils.convert_time_duration_to_iso_format(time_duration) + self.assertEqual(expected, result) + + def test_convert_time_duration_to_iso_format_negative(self): + self.assertRaises(exception.ManilaException, + utils.convert_time_duration_to_iso_format, + 'invalid_duration') diff --git a/manila/utils.py b/manila/utils.py index 94241af8a6..9c150b2628 100644 --- a/manila/utils.py +++ b/manila/utils.py @@ -617,3 +617,25 @@ def write_remote_file(ssh, filename, contents, as_root=False): stdin.close() stdin.channel.shutdown_write() ssh.exec_command(cmd2) + + +def convert_time_duration_to_iso_format(time_duration): + """Covert time duration to ISO 8601 format""" + unit_mapping = { + 'minutes': 'M', + 'hours': 'H', + 'days': 'D', + 'months': 'M', + 'years': 'Y', + } + pattern = re.compile(r'(\d+)\s*(minutes|hours|days|months|years)') + match = pattern.match(time_duration) + if not match: + raise exception.ManilaException( + f"Invalid time duration format: {time_duration}") + value, unit = match.groups() + if unit in ["minutes", "hours", "days"]: + iso_format = f"PT{value}{unit_mapping[unit]}" + else: + iso_format = f"P{value}{unit_mapping[unit]}" + return iso_format diff --git a/releasenotes/notes/add-ensure-shares-api-9ac10877a99ab0c5.yaml b/releasenotes/notes/add-ensure-shares-api-9ac10877a99ab0c5.yaml new file mode 100644 index 0000000000..0ff0bb93e0 --- /dev/null +++ b/releasenotes/notes/add-ensure-shares-api-9ac10877a99ab0c5.yaml @@ -0,0 +1,16 @@ +--- +features: + - | + A new API to start the ensure shares procedure for Manila has been added. + Through this API, OpenStack administrators will be able to recalculate the + shares' export location without restarting the shares manager service. + Additionally, a new configuration option named + `update_shares_status_on_ensure` is now available to help OpenStack + administrators determine whether the shares' status should be modified + during the ensure shares procedure or not. +upgrade: + - | + When restarting the service on an upgrade, when ensure shares is being run + it will automatically transition the shares status to `ensuring`. In case + you would like to prevent it, please change the value of the + `update_shares_status_on_ensure` configuration option. diff --git a/releasenotes/notes/add_export_location_metadata-d3c279b73f4c4728.yaml b/releasenotes/notes/add_export_location_metadata-d3c279b73f4c4728.yaml new file mode 100644 index 0000000000..0921799fa8 --- /dev/null +++ b/releasenotes/notes/add_export_location_metadata-d3c279b73f4c4728.yaml @@ -0,0 +1,9 @@ +--- +features: + - | + Added share export location metadata capabilities including + create, update all, update single, show and delete metadata. + Allows configuration of `admin_only_el_metadata`, + such that keys in this list are able to be manipulated only by + those with admin privileges. By default, this includes + "preferred" key. diff --git a/releasenotes/notes/bp-pass-resource-metadata-updates-to-backend-drivers-7fff302f64fda2d7.yaml b/releasenotes/notes/bp-pass-resource-metadata-updates-to-backend-drivers-7fff302f64fda2d7.yaml new file mode 100644 index 0000000000..6c591e0bb5 --- /dev/null +++ b/releasenotes/notes/bp-pass-resource-metadata-updates-to-backend-drivers-7fff302f64fda2d7.yaml @@ -0,0 +1,11 @@ +--- +features: + - | + OpenStack operators can now make use of a new config option named + `driver_updatable_metadata` to determine which share metadata updates the + back end driver needs to be notified about. The config option contains + list of share metadata keys. When the share's metadata gets updated and + Manila identifies that the new metadata keys match the metadata keys from + the provided list, the share back end will be notified and it will apply + the necessary changes. The result will be communicated through user + messages. diff --git a/releasenotes/notes/bp-pass-share-network-subnet-metadata-updates-to-backend-drivers-10441eee8375f146.yaml b/releasenotes/notes/bp-pass-share-network-subnet-metadata-updates-to-backend-drivers-10441eee8375f146.yaml new file mode 100644 index 0000000000..1e15609141 --- /dev/null +++ b/releasenotes/notes/bp-pass-share-network-subnet-metadata-updates-to-backend-drivers-10441eee8375f146.yaml @@ -0,0 +1,15 @@ +--- +features: + - | + OpenStack operators can now make use of a new config option named + `driver_updatable_subnet_metadata` to determine which share network + subnet metadata updates the back end driver needs to be notified about. + The config option contains list of share network subnet metadata keys. + When the share network subnet's metadata gets updated and Manila + identifies that the new metadata keys match the metadata keys from the + provided list, the share back end will be notified and it will apply + the necessary changes. The result will be communicated through user + messages. This feature is supported from microversion '2.89'. Since, + with share network migration, metadata belonging to the old share network + subnet is ignored when moving to a new share network, updates will not + be passed to new share servers. diff --git a/releasenotes/notes/bug-2020745-dell-unity-lacp-8653da49ad901c5c.yaml b/releasenotes/notes/bug-2020745-dell-unity-lacp-8653da49ad901c5c.yaml new file mode 100644 index 0000000000..b3502a679b --- /dev/null +++ b/releasenotes/notes/bug-2020745-dell-unity-lacp-8653da49ad901c5c.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Dell Unity Driver `Bug #2020745 + `_: + Fixed driver startup issue with link aggregation configured. diff --git a/releasenotes/notes/bug-2066871-allow-to-update-access-level-for-access-rule-741f8fc3cc190701.yaml b/releasenotes/notes/bug-2066871-allow-to-update-access-level-for-access-rule-741f8fc3cc190701.yaml new file mode 100644 index 0000000000..c4d2c82aaa --- /dev/null +++ b/releasenotes/notes/bug-2066871-allow-to-update-access-level-for-access-rule-741f8fc3cc190701.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Since microversion 2.88, Manila will allow to update access_level of access + rule using `openstack share access update` API. Currently this is supported + only for NetApp ONTAP backend. For more details, please check + `Launchpad bug #2066871 `_ diff --git a/releasenotes/notes/bug-2089061-fix-access-rules-locks-lookup-b5efbd41397acba3.yaml b/releasenotes/notes/bug-2089061-fix-access-rules-locks-lookup-b5efbd41397acba3.yaml new file mode 100644 index 0000000000..8cacc2fab2 --- /dev/null +++ b/releasenotes/notes/bug-2089061-fix-access-rules-locks-lookup-b5efbd41397acba3.yaml @@ -0,0 +1,8 @@ +--- +fixes: + - | + While displaying and deleting access rules, manila was limiting the search + for locks to the context of the request. Now, manila will search within + all of the projects for locks and properly apply visibility and deletion + restrictions. For more details, please refer to + `launchpad bug #2089061 `_. diff --git a/releasenotes/notes/bug-2099273-stop-overriding-cephfs-nfs-protocols-cf7e3949f688ad6f.yaml b/releasenotes/notes/bug-2099273-stop-overriding-cephfs-nfs-protocols-cf7e3949f688ad6f.yaml new file mode 100644 index 0000000000..49e3a1ec3d --- /dev/null +++ b/releasenotes/notes/bug-2099273-stop-overriding-cephfs-nfs-protocols-cf7e3949f688ad6f.yaml @@ -0,0 +1,8 @@ +--- +fixes: + - | + `Launchpad bug 2099273 ` + has been addressed by removing the NFS protocol override within the + request sent to the Ceph Manager API. This allows users to mount shares + exported by the Ceph NFS service with any NFS protocol version exposed by + that service. diff --git a/releasenotes/notes/bug-2104357-Fix-server_migrating-status-of-non-active-replica-6af28a67a4684d16.yaml b/releasenotes/notes/bug-2104357-Fix-server_migrating-status-of-non-active-replica-6af28a67a4684d16.yaml new file mode 100644 index 0000000000..481945a94d --- /dev/null +++ b/releasenotes/notes/bug-2104357-Fix-server_migrating-status-of-non-active-replica-6af28a67a4684d16.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Manila now correctly handles the 'server_migrating' status of share and + snapshot instances during share server migration especially during share + server belonging to non-active replica. For more details, please check + `Launchpad bug #2104357 `_ diff --git a/releasenotes/notes/fix-generic-driver-resize-0fde9c8674db5951.yaml b/releasenotes/notes/fix-generic-driver-resize-0fde9c8674db5951.yaml new file mode 100644 index 0000000000..bc5b078c7d --- /dev/null +++ b/releasenotes/notes/fix-generic-driver-resize-0fde9c8674db5951.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + For generic driver, when resize a share it may failed due to exportfs + can't recognize "" as part of the share path. This issue has been + fixed by replacing "" to "*". diff --git a/releasenotes/notes/fix-generic-driver-using-uuid-to-mount-volumes-291208b283120224.yaml b/releasenotes/notes/fix-generic-driver-using-uuid-to-mount-volumes-291208b283120224.yaml new file mode 100644 index 0000000000..c2233e0f1f --- /dev/null +++ b/releasenotes/notes/fix-generic-driver-using-uuid-to-mount-volumes-291208b283120224.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Generic driver - volumes mounted to the service instance are now mounted + via its uuid (/dev/disk/by-uuid/...) instead of canonical paths (/dev/vdb) + to prevent messing up the shares after restarts. diff --git a/releasenotes/notes/fix-no-router-server-0d5bf587063f22fc.yaml b/releasenotes/notes/fix-no-router-server-0d5bf587063f22fc.yaml new file mode 100644 index 0000000000..25fd3f6b74 --- /dev/null +++ b/releasenotes/notes/fix-no-router-server-0d5bf587063f22fc.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fix subnet cleanup for server instances without routers. + Previously, when tearing down a server instance that had no router + specified in its details, the associated subnet was not cleaned up because + the subnet cleanup code was never executed. diff --git a/releasenotes/notes/manila-netapp-storage-efficiency-policy-5fa0b2b15901bf93.yaml b/releasenotes/notes/manila-netapp-storage-efficiency-policy-5fa0b2b15901bf93.yaml new file mode 100644 index 0000000000..e32a2f8389 --- /dev/null +++ b/releasenotes/notes/manila-netapp-storage-efficiency-policy-5fa0b2b15901bf93.yaml @@ -0,0 +1,10 @@ +features: + - | + It is now possible to specify pre-created NetApp efficiency + policies through the use of the `netapp:efficiency_policy` share type + extra spec. + In the case of DHSS=True, the share server is not available upfront for + efficiency policy creation. Users can retype to apply the policy, or if the + share-network is constant (i.e., one share service is created for one share + network), they can create an efficiency policy for the share server and + apply it to DHSS=True shares. \ No newline at end of file diff --git a/releasenotes/notes/netapp-add-update-from-network-subnet-metadata-method-0615490d86958c3d.yaml b/releasenotes/notes/netapp-add-update-from-network-subnet-metadata-method-0615490d86958c3d.yaml new file mode 100644 index 0000000000..ee2eab6f4c --- /dev/null +++ b/releasenotes/notes/netapp-add-update-from-network-subnet-metadata-method-0615490d86958c3d.yaml @@ -0,0 +1,9 @@ +--- +fixes: + - | + The NetApp ONTAP driver is now able to update the `showmount` and + `pnfs` configurations in a pre-created share server. Please use the + share network subnet metadata set feature to update these values. The + valid values for those metadata keys are 'true/false'. + Also note, earlier support of updating `showmount` using share metadata + is removed since its supported now by share network subnet metadata. diff --git a/releasenotes/notes/netapp-add-update-share-from-metadata-method-71f308c2b05d59bb.yaml b/releasenotes/notes/netapp-add-update-share-from-metadata-method-71f308c2b05d59bb.yaml new file mode 100644 index 0000000000..d2b4bc6f97 --- /dev/null +++ b/releasenotes/notes/netapp-add-update-share-from-metadata-method-71f308c2b05d59bb.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + The NetApp ONTAP driver is now able to update the current `snapshot_policy` + and/or both `showmount` configurations in a pre-created share. Please use + the share metadata set feature to update these values. diff --git a/releasenotes/notes/netapp-restrict-lif-creation-per-ha-pair-249021556be5189d.yaml b/releasenotes/notes/netapp-restrict-lif-creation-per-ha-pair-249021556be5189d.yaml new file mode 100644 index 0000000000..2aae18d796 --- /dev/null +++ b/releasenotes/notes/netapp-restrict-lif-creation-per-ha-pair-249021556be5189d.yaml @@ -0,0 +1,11 @@ +--- +fixes: + - | + NetApp driver `bug #2100835 + `_: + The NetApp driver now supports to prevent the creation of a share + server if the total number of data LIFs on one node of HA pair, + including those that can be migrated in case of failure,exceeds the + maximum number data LIFs supported by the node. This option guarantees + that, in the event of a node failure, the partner node will be able to + takeover all data LIFs. diff --git a/releasenotes/notes/snaplock-support-for-netapp-driver-9b639386c07c4990.yaml b/releasenotes/notes/snaplock-support-for-netapp-driver-9b639386c07c4990.yaml new file mode 100644 index 0000000000..a209ff0759 --- /dev/null +++ b/releasenotes/notes/snaplock-support-for-netapp-driver-9b639386c07c4990.yaml @@ -0,0 +1,8 @@ +--- +features: + - | + NetApp Driver: The NetApp Driver now supports the creation of WORM + shares using the SnapLock feature. To create these shares, set the + 'netapp:snaplock_type' in the share type extra specs, along with the + SnapLock related retention period extra specs. + diff --git a/setup.cfg b/setup.cfg index ab29d1b6a7..410bf85870 100644 --- a/setup.cfg +++ b/setup.cfg @@ -32,7 +32,6 @@ packages = [entry_points] console_scripts = - manila-all = manila.cmd.all:main manila-api = manila.cmd.api:main manila-data = manila.cmd.data:main manila-manage = manila.cmd.manage:main diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml index 864e71300c..8f79d0cbb4 100644 --- a/zuul.d/project.yaml +++ b/zuul.d/project.yaml @@ -19,7 +19,7 @@ - manila-tox-py311-with-sqlalchemy-20: voting: false - manila-tempest-plugin-dummy-no-dhss - - manila-tempest-plugin-dummy-dhss + - manila-tempest-plugin-dummy-dhss-legacy - manila-tempest-plugin-container: voting: false - manila-tempest-plugin-lvm-fips: @@ -33,7 +33,7 @@ gate: jobs: - manila-tempest-plugin-dummy-no-dhss - - manila-tempest-plugin-dummy-dhss + - manila-tempest-plugin-dummy-dhss-legacy # Non-voting job while SQLAlchemy 2.0 is still blocked by upper-constraints - job: @@ -96,3 +96,14 @@ GLOBAL_VENV: false required-projects: - openstack/rally-openstack + +- job: + name: manila-tempest-plugin-dummy-dhss-legacy + parent: manila-tempest-plugin-dummy-dhss + vars: + devstack_local_conf: + test-config: + "$TEMPEST_CONFIG": + share: + run_negative_migration_replica_tests: true + run_positive_migration_replica_tests: false