Skip to content
Open
Show file tree
Hide file tree
Changes from 20 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -1,3 +1,22 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

package org.apache.cloudstack.storage.feign;

import feign.RequestInterceptor;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
*/
package org.apache.cloudstack.storage.feign.client;

import feign.QueryMap;
import org.apache.cloudstack.storage.feign.model.Igroup;
import org.apache.cloudstack.storage.feign.model.Lun;
import org.apache.cloudstack.storage.feign.model.LunMap;
Expand All @@ -26,20 +27,19 @@
import feign.Param;
import feign.RequestLine;
import java.net.URI;
import java.util.Map;

//TODO: Proper URLs should be added in the RequestLine annotations below
public interface SANFeignClient {

// LUN Operation APIs
@RequestLine("POST /")
@Headers({"Authorization: {authHeader}", "return_records: {returnRecords}"})
OntapResponse<Lun> createLun(@Param("authHeader") String authHeader,
@Param("returnRecords") boolean returnRecords,
Lun lun);
@RequestLine("POST /api/storage/luns?return_records={returnRecords}")
@Headers({"Authorization: {authHeader}"})
OntapResponse<Lun> createLun(@Param("authHeader") String authHeader, @Param("returnRecords") boolean returnRecords, Lun lun);

@RequestLine("GET /")
@RequestLine("GET /api/storage/luns")
@Headers({"Authorization: {authHeader}"})
OntapResponse<Lun> getLunResponse(@Param("authHeader") String authHeader);
OntapResponse<Lun> getLunResponse(@Param("authHeader") String authHeader, @QueryMap Map<String, Object> queryMap);

@RequestLine("GET /{uuid}")
@Headers({"Authorization: {authHeader}"})
Expand All @@ -54,15 +54,13 @@ OntapResponse<Lun> createLun(@Param("authHeader") String authHeader,
void deleteLun(@Param("authHeader") String authHeader, @Param("uuid") String uuid);

// iGroup Operation APIs
@RequestLine("POST /")
@Headers({"Authorization: {authHeader}", "return_records: {returnRecords}"})
OntapResponse<Igroup> createIgroup(@Param("authHeader") String authHeader,
@Param("returnRecords") boolean returnRecords,
Igroup igroupRequest);
@RequestLine("POST /api/protocols/san/igroups?return_records={returnRecords}")
@Headers({"Authorization: {authHeader}"})
OntapResponse<Igroup> createIgroup(@Param("authHeader") String authHeader, @Param("returnRecords") boolean returnRecords, Igroup igroupRequest);

@RequestLine("GET /")
@Headers({"Authorization: {authHeader}"}) // TODO: Check this again, uuid should be part of the path?
OntapResponse<Igroup> getIgroupResponse(@Param("authHeader") String authHeader, @Param("uuid") String uuid);
@RequestLine("GET /api/protocols/san/igroups")
@Headers({"Authorization: {authHeader}"})
OntapResponse<Igroup> getIgroupResponse(@Param("authHeader") String authHeader, @QueryMap Map<String, Object> queryMap);

@RequestLine("GET /{uuid}")
@Headers({"Authorization: {authHeader}"})
Expand All @@ -73,17 +71,18 @@ OntapResponse<Igroup> createIgroup(@Param("authHeader") String authHeader,
void deleteIgroup(@Param("baseUri") URI baseUri, @Param("authHeader") String authHeader, @Param("uuid") String uuid);

// LUN Maps Operation APIs
@RequestLine("POST /")
@Headers({"Authorization: {authHeader}"})
OntapResponse<LunMap> createLunMap(@Param("authHeader") String authHeader, LunMap lunMap);
@RequestLine("POST /api/protocols/san/lun-maps")
@Headers({"Authorization: {authHeader}", "return_records: {returnRecords}"})
OntapResponse<LunMap> createLunMap(@Param("authHeader") String authHeader, @Param("returnRecords") boolean returnRecords, LunMap lunMap);


@RequestLine("GET /")
@Headers({"Authorization: {authHeader}"})
OntapResponse<LunMap> getLunMapResponse(@Param("authHeader") String authHeader);

@RequestLine("DELETE /{lunUuid}/{igroupUuid}")
@RequestLine("DELETE /api/protocols/san/lun-maps/{lunUuid}/{igroupUuid}")
@Headers({"Authorization: {authHeader}"})
void deleteLunMap(@Param("authHeader") String authHeader,
@Param("lunUuid") String lunUuid,
@Param("igroupUuid") String igroupUuid);
@Param("lunUuid") String lunUUID,
@Param("igroupUuid") String igroupUUID);
}
Original file line number Diff line number Diff line change
Expand Up @@ -42,4 +42,3 @@ public interface VolumeFeignClient {
@Headers({"Accept: {acceptHeader}", "Authorization: {authHeader}"})
JobResponse updateVolumeRebalancing(@Param("acceptHeader") String acceptHeader, @Param("uuid") String uuid, Volume volumeRequest);
}

Original file line number Diff line number Diff line change
Expand Up @@ -143,5 +143,4 @@ public int hashCode() {

@JsonInclude(JsonInclude.Include.NON_NULL)
public static class Links { }

}
}
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import com.cloud.agent.api.StoragePoolInfo;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.host.HostVO;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.resource.ResourceManager;
Expand All @@ -38,17 +39,26 @@
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.lifecycle.BasePrimaryDataStoreLifeCycleImpl;
import org.apache.cloudstack.storage.feign.model.Igroup;
import org.apache.cloudstack.storage.feign.model.Initiator;
import org.apache.cloudstack.storage.feign.model.OntapStorage;
import org.apache.cloudstack.storage.feign.model.Svm;
import org.apache.cloudstack.storage.provider.StorageProviderFactory;
import org.apache.cloudstack.storage.service.StorageStrategy;
import org.apache.cloudstack.storage.service.model.AccessGroup;
import org.apache.cloudstack.storage.service.model.ProtocolType;
import org.apache.cloudstack.storage.utils.Constants;
import org.apache.cloudstack.storage.utils.Utility;
import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;

import javax.inject.Inject;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Set;
Expand All @@ -59,6 +69,8 @@ public class OntapPrimaryDatastoreLifecycle extends BasePrimaryDataStoreLifeCycl
@Inject private StorageManager _storageMgr;
@Inject private ResourceManager _resourceMgr;
@Inject private PrimaryDataStoreHelper _dataStoreHelper;
@Inject private PrimaryDataStoreDao storagePoolDao;
@Inject private StoragePoolDetailsDao storagePoolDetailsDao;
private static final Logger s_logger = LogManager.getLogger(OntapPrimaryDatastoreLifecycle.class);

// ONTAP minimum volume size is 1.56 GB (1677721600 bytes)
Expand Down Expand Up @@ -241,16 +253,48 @@ public DataStore initialize(Map<String, Object> dsInfos) {
@Override
public boolean attachCluster(DataStore dataStore, ClusterScope scope) {
logger.debug("In attachCluster for ONTAP primary storage");
PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo)dataStore;
List<HostVO> hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primarystore);
if (dataStore == null) {
throw new InvalidParameterValueException("attachCluster: dataStore should not be null");
}
if (scope == null) {
throw new InvalidParameterValueException("attachCluster: scope should not be null");
}
List<String> hostsIdentifier = new ArrayList<>();
StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId());
if (storagePool == null) {
s_logger.error("attachCluster : Storage Pool not found for id: " + dataStore.getId());
throw new CloudRuntimeException("attachCluster : Storage Pool not found for id: " + dataStore.getId());
}
PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)dataStore;
List<HostVO> hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primaryStore);
// TODO- need to check if no host to connect then throw exception or just continue
logger.debug("attachCluster: Eligible Up and Enabled hosts: {} in cluster {}", hostsToConnect, primaryStore.getClusterId());

logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primarystore.getClusterId()));
Map<String, String> details = storagePoolDetailsDao.listDetailsKeyPairs(storagePool.getId());
StorageStrategy strategy = Utility.getStrategyByStoragePoolDetails(details);
ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL));
//TODO- Check if we have to handle heterogeneous host within the cluster
if (!validateProtocolSupportAndFetchHostsIdentifier(hostsToConnect, protocol, hostsIdentifier)) {
String errMsg = "attachCluster: Not all hosts in the cluster support the protocol: " + protocol.name();
s_logger.error(errMsg);
throw new CloudRuntimeException(errMsg);
}
//TODO - check if no host to connect then also need to create access group without initiators
if (hostsIdentifier != null && hostsIdentifier.size() > 0) {
try {
AccessGroup accessGroupRequest = createAccessGroupRequestByProtocol(storagePool, scope.getScopeId(), details, hostsIdentifier);
strategy.createAccessGroup(accessGroupRequest);
} catch (Exception e) {
s_logger.error("attachCluster: Failed to create access group on storage system for cluster: " + primaryStore.getClusterId() + ". Exception: " + e.getMessage(), e);
throw new CloudRuntimeException("attachCluster: Failed to create access group on storage system for cluster: " + primaryStore.getClusterId() + ". Exception: " + e.getMessage(), e);
}
}
logger.debug("attachCluster: Attaching the pool to each of the host in the cluster: {}", primaryStore.getClusterId());
for (HostVO host : hostsToConnect) {
// TODO: Fetch the host IQN and add to the initiator group on ONTAP cluster
try {
_storageMgr.connectHostToSharedPool(host, dataStore.getId());
} catch (Exception e) {
logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e);
logger.warn("attachCluster: Unable to establish a connection between " + host + " and " + dataStore, e);
}
}
_dataStoreHelper.attachCluster(dataStore);
Expand All @@ -265,11 +309,41 @@ public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo exis
@Override
public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.HypervisorType hypervisorType) {
logger.debug("In attachZone for ONTAP primary storage");
if (dataStore == null) {
throw new InvalidParameterValueException("attachZone: dataStore should not be null");
}
if (scope == null) {
throw new InvalidParameterValueException("attachZone: scope should not be null");
}
List<String> hostsIdentifier = new ArrayList<>();
StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId());
if (storagePool == null) {
s_logger.error("attachZone : Storage Pool not found for id: " + dataStore.getId());
throw new CloudRuntimeException("attachZone : Storage Pool not found for id: " + dataStore.getId());
}
List<HostVO> hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), Hypervisor.HypervisorType.KVM);
// TODO- need to check if no host to connect then throw exception or just continue
logger.debug("attachZone: Eligible Up and Enabled hosts: {}", hostsToConnect);

logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect));
Map<String, String> details = storagePoolDetailsDao.listDetailsKeyPairs(dataStore.getId());
StorageStrategy strategy = Utility.getStrategyByStoragePoolDetails(details);
ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL));
//TODO- Check if we have to handle heterogeneous host within the zone
if (!validateProtocolSupportAndFetchHostsIdentifier(hostsToConnect, protocol, hostsIdentifier)) {
String errMsg = "attachZone: Not all hosts in the zone support the protocol: " + protocol.name();
s_logger.error(errMsg);
throw new CloudRuntimeException(errMsg);
}
if (hostsIdentifier != null && !hostsIdentifier.isEmpty()) {
try {
AccessGroup accessGroupRequest = createAccessGroupRequestByProtocol(storagePool, scope.getScopeId(), details, hostsIdentifier);
strategy.createAccessGroup(accessGroupRequest);
} catch (Exception e) {
s_logger.error("attachZone: Failed to create access group on storage system for zone with Exception: " + e.getMessage());
throw new CloudRuntimeException("attachZone: Failed to create access group on storage system for zone with Exception: " + e.getMessage());
}
}
for (HostVO host : hostsToConnect) {
// TODO: Fetch the host IQN and add to the initiator group on ONTAP cluster
try {
_storageMgr.connectHostToSharedPool(host, dataStore.getId());
} catch (Exception e) {
Expand All @@ -280,6 +354,73 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.Hyper
return true;
}

private boolean validateProtocolSupportAndFetchHostsIdentifier(List<HostVO> hosts, ProtocolType protocolType, List<String> hostIdentifiers) {
switch (protocolType) {
case ISCSI:
String protocolPrefix = Constants.IQN;
for (HostVO host : hosts) {
if (host == null || host.getStorageUrl() == null || host.getStorageUrl().trim().isEmpty()
|| !host.getStorageUrl().startsWith(protocolPrefix)) {
return false;
}
hostIdentifiers.add(host.getStorageUrl());
}
break;
default:
throw new CloudRuntimeException("validateProtocolSupportAndFetchHostsIdentifier : Unsupported protocol: " + protocolType.name());
}
logger.info("validateProtocolSupportAndFetchHostsIdentifier: All hosts support the protocol: " + protocolType.name());
return true;
}

private AccessGroup createAccessGroupRequestByProtocol(StoragePoolVO storagePool, long scopeId, Map<String, String> details, List<String> hostsIdentifier) {
ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL).toUpperCase());
String svmName = details.get(Constants.SVM_NAME);
switch (protocol) {
case ISCSI:
// Access group name format: cs_svmName_scopeId
String igroupName = Utility.getIgroupName(svmName, scopeId);
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This getIgroupName could rather be in some helper class instead of Utility

Hypervisor.HypervisorType hypervisorType = storagePool.getHypervisor();
return createSANAccessGroupRequest(svmName, igroupName, hypervisorType, hostsIdentifier);
default:
s_logger.error("createAccessGroupRequestByProtocol: Unsupported protocol " + protocol);
throw new CloudRuntimeException("createAccessGroupRequestByProtocol: Unsupported protocol " + protocol);
}
}

private AccessGroup createSANAccessGroupRequest(String svmName, String igroupName, Hypervisor.HypervisorType hypervisorType, List<String> hostsIdentifier) {
AccessGroup accessGroupRequest = new AccessGroup();
Igroup igroup = new Igroup();

if (svmName != null && !svmName.isEmpty()) {
Svm svm = new Svm();
svm.setName(svmName);
igroup.setSvm(svm);
}

if (igroupName != null && !igroupName.isEmpty()) {
igroup.setName(igroupName);
}

if (hypervisorType != null) {
String hypervisorName = hypervisorType.name();
igroup.setOsType(Igroup.OsTypeEnum.valueOf(Utility.getOSTypeFromHypervisor(hypervisorName)));
}

if (hostsIdentifier != null && hostsIdentifier.size() > 0) {
List<Initiator> initiators = new ArrayList<>();
for (String hostIdentifier : hostsIdentifier) {
Initiator initiator = new Initiator();
initiator.setName(hostIdentifier);
initiators.add(initiator);
}
igroup.setInitiators(initiators);
}
accessGroupRequest.setIgroup(igroup);
s_logger.debug("createSANAccessGroupRequest: request: " + accessGroupRequest);
return accessGroupRequest;
}

@Override
public boolean maintain(DataStore store) {
return true;
Expand Down Expand Up @@ -325,4 +466,3 @@ public void changeStoragePoolScopeToCluster(DataStore store, ClusterScope cluste

}
}

Loading
Loading