diff --git a/plugins/storage/volume/ontap/pom.xml b/plugins/storage/volume/ontap/pom.xml
index 10ca7935f408..3afb723b5408 100644
--- a/plugins/storage/volume/ontap/pom.xml
+++ b/plugins/storage/volume/ontap/pom.xml
@@ -31,7 +31,7 @@
2021.0.7
11.0
20230227
- 2.15.2
+ 2.13.4
4.5.14
1.6.2
3.8.1
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java
index e2eb6220230a..beb1b95578a5 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java
@@ -277,7 +277,7 @@ private StorageStrategy getStrategyByStoragePoolDetails(Map deta
}
String protocol = details.get(Constants.PROTOCOL);
OntapStorage ontapStorage = new OntapStorage(details.get(Constants.USERNAME), details.get(Constants.PASSWORD),
- details.get(Constants.MANAGEMENT_LIF), details.get(Constants.SVM_NAME), ProtocolType.valueOf(protocol),
+ details.get(Constants.MANAGEMENT_LIF), details.get(Constants.SVM_NAME), Long.parseLong(details.get(Constants.SIZE)), ProtocolType.valueOf(protocol),
Boolean.parseBoolean(details.get(Constants.IS_DISAGGREGATED)));
StorageStrategy storageStrategy = StorageProviderFactory.getStrategy(ontapStorage);
boolean isValid = storageStrategy.connect();
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java
index ce2783add228..79d28a6075be 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java
@@ -1,5 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
package org.apache.cloudstack.storage.feign;
+import com.fasterxml.jackson.databind.ObjectMapper;
import feign.RequestInterceptor;
import feign.Retryer;
import feign.Client;
@@ -11,7 +31,6 @@
import feign.codec.EncodeException;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.DeserializationFeature;
-import com.fasterxml.jackson.databind.json.JsonMapper;
import org.apache.http.conn.ConnectionKeepAliveStrategy;
import org.apache.http.conn.ssl.NoopHostnameVerifier;
import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
@@ -36,13 +55,11 @@ public class FeignConfiguration {
private final int retryMaxInterval = 5;
private final String ontapFeignMaxConnection = "80";
private final String ontapFeignMaxConnectionPerRoute = "20";
- private final JsonMapper jsonMapper;
+ private final ObjectMapper objectMapper;
public FeignConfiguration() {
- this.jsonMapper = JsonMapper.builder()
- .disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES)
- .findAndAddModules()
- .build();
+ this.objectMapper = new ObjectMapper();
+ this.objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
}
public Client createClient() {
@@ -105,7 +122,7 @@ public void encode(Object object, Type bodyType, feign.RequestTemplate template)
return;
}
try {
- byte[] jsonBytes = jsonMapper.writeValueAsBytes(object);
+ byte[] jsonBytes = objectMapper.writeValueAsBytes(object);
template.body(jsonBytes, StandardCharsets.UTF_8);
template.header("Content-Type", "application/json");
} catch (JsonProcessingException e) {
@@ -126,7 +143,7 @@ public Object decode(Response response, Type type) throws IOException, DecodeExc
try (InputStream bodyStream = response.body().asInputStream()) {
json = new String(bodyStream.readAllBytes(), StandardCharsets.UTF_8);
logger.debug("Decoding JSON response: {}", json);
- return jsonMapper.readValue(json, jsonMapper.getTypeFactory().constructType(type));
+ return objectMapper.readValue(json, objectMapper.getTypeFactory().constructType(type));
} catch (IOException e) {
logger.error("Error decoding JSON response. Status: {}, Raw body: {}", response.status(), json, e);
throw new DecodeException(response.status(), "Error decoding JSON response", response.request(), e);
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NetworkFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NetworkFeignClient.java
new file mode 100644
index 000000000000..4acbbecf6573
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NetworkFeignClient.java
@@ -0,0 +1,16 @@
+package org.apache.cloudstack.storage.feign.client;
+
+import feign.Headers;
+import feign.Param;
+import feign.QueryMap;
+import feign.RequestLine;
+import org.apache.cloudstack.storage.feign.model.IpInterface;
+import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
+
+import java.util.Map;
+
+public interface NetworkFeignClient {
+ @RequestLine("GET /api/network/ip/interfaces")
+ @Headers({"Authorization: {authHeader}"})
+ OntapResponse getNetworkIpInterfaces(@Param("authHeader") String authHeader, @QueryMap Map queryParams);
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java
index 9a2c76639221..717409664662 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java
@@ -18,22 +18,30 @@
*/
package org.apache.cloudstack.storage.feign.client;
+import feign.QueryMap;
import org.apache.cloudstack.storage.feign.model.Volume;
import org.apache.cloudstack.storage.feign.model.response.JobResponse;
import feign.Headers;
import feign.Param;
import feign.RequestLine;
+import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
+
+import java.util.Map;
public interface VolumeFeignClient {
@RequestLine("DELETE /api/storage/volumes/{uuid}")
@Headers({"Authorization: {authHeader}"})
- void deleteVolume(@Param("authHeader") String authHeader, @Param("uuid") String uuid);
+ JobResponse deleteVolume(@Param("authHeader") String authHeader, @Param("uuid") String uuid);
@RequestLine("POST /api/storage/volumes")
@Headers({"Authorization: {authHeader}"})
JobResponse createVolumeWithJob(@Param("authHeader") String authHeader, Volume volumeRequest);
+ @RequestLine("GET /api/storage/volumes")
+ @Headers({"Authorization: {authHeader}"})
+ OntapResponse getAllVolumes(@Param("authHeader") String authHeader, @QueryMap Map queryParams);
+
@RequestLine("GET /api/storage/volumes/{uuid}")
@Headers({"Authorization: {authHeader}"})
Volume getVolumeByUUID(@Param("authHeader") String authHeader, @Param("uuid") String uuid);
@@ -42,4 +50,3 @@ public interface VolumeFeignClient {
@Headers({"Accept: {acceptHeader}", "Authorization: {authHeader}"})
JobResponse updateVolumeRebalancing(@Param("acceptHeader") String acceptHeader, @Param("uuid") String uuid, Volume volumeRequest);
}
-
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Aggregate.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Aggregate.java
index c91f0f87eb27..8ac1717604a5 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Aggregate.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Aggregate.java
@@ -22,12 +22,43 @@
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonValue;
import java.util.Objects;
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
public class Aggregate {
+ // Replace previous enum with case-insensitive mapping
+ public enum StateEnum {
+ ONLINE("online");
+ private final String value;
+
+ StateEnum(String value) {
+ this.value = value;
+ }
+
+ @JsonValue
+ public String getValue() {
+ return value;
+ }
+
+ @Override
+ public String toString() {
+ return String.valueOf(value);
+ }
+
+ @JsonCreator
+ public static StateEnum fromValue(String text) {
+ for (StateEnum b : StateEnum.values()) {
+ if (String.valueOf(b.value).equals(text)) {
+ return b;
+ }
+ }
+ return null;
+ }
+ }
@JsonProperty("name")
private String name = null;
@@ -40,6 +71,13 @@ public int hashCode() {
@JsonProperty("uuid")
private String uuid = null;
+ @JsonProperty("state")
+ private StateEnum state = null;
+
+ @JsonProperty("space")
+ private AggregateSpace space = null;
+
+
public Aggregate name(String name) {
this.name = name;
return this;
@@ -65,6 +103,21 @@ public void setUuid(String uuid) {
this.uuid = uuid;
}
+ public StateEnum getState() {
+ return state;
+ }
+
+ public AggregateSpace getSpace() {
+ return space;
+ }
+
+ public Double getAvailableBlockStorageSpace() {
+ if (space != null && space.blockStorage != null) {
+ return space.blockStorage.available;
+ }
+ return null;
+ }
+
@Override
public boolean equals(java.lang.Object o) {
@@ -95,4 +148,18 @@ public String toString() {
return "DiskAggregates [name=" + name + ", uuid=" + uuid + "]";
}
+ public static class AggregateSpace {
+ @JsonProperty("block_storage")
+ private AggregateSpaceBlockStorage blockStorage = null;
+ }
+
+ public static class AggregateSpaceBlockStorage {
+ @JsonProperty("available")
+ private Double available = null;
+ @JsonProperty("size")
+ private Double size = null;
+ @JsonProperty("used")
+ private Double used = null;
+ }
+
}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/IpInterface.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/IpInterface.java
new file mode 100644
index 000000000000..c15798a42b70
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/IpInterface.java
@@ -0,0 +1,155 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.feign.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+import java.util.List;
+import java.util.Objects;
+
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class IpInterface {
+ @JsonProperty("uuid")
+ private String uuid;
+
+ @JsonProperty("name")
+ private String name;
+
+ @JsonProperty("ip")
+ private IpInfo ip;
+
+ @JsonProperty("svm")
+ private Svm svm;
+
+ @JsonProperty("services")
+ private List services;
+
+ // Getters and setters
+ public String getUuid() {
+ return uuid;
+ }
+
+ public void setUuid(String uuid) {
+ this.uuid = uuid;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ public IpInfo getIp() {
+ return ip;
+ }
+
+ public void setIp(IpInfo ip) {
+ this.ip = ip;
+ }
+
+ public Svm getSvm() {
+ return svm;
+ }
+
+ public void setSvm(Svm svm) {
+ this.svm = svm;
+ }
+
+ public List getServices() {
+ return services;
+ }
+
+ public void setServices(List services) {
+ this.services = services;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ IpInterface that = (IpInterface) o;
+ return Objects.equals(uuid, that.uuid) &&
+ Objects.equals(name, that.name) &&
+ Objects.equals(ip, that.ip) &&
+ Objects.equals(svm, that.svm) &&
+ Objects.equals(services, that.services);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(uuid, name, ip, svm, services);
+ }
+
+ @Override
+ public String toString() {
+ return "IpInterface{" +
+ "uuid='" + uuid + '\'' +
+ ", name='" + name + '\'' +
+ ", ip=" + ip +
+ ", svm=" + svm +
+ ", services=" + services +
+ '}';
+ }
+
+ // Nested class for IP information
+ @JsonIgnoreProperties(ignoreUnknown = true)
+ @JsonInclude(JsonInclude.Include.NON_NULL)
+ public static class IpInfo {
+ @JsonProperty("address")
+ private String address;
+
+ public String getAddress() {
+ return address;
+ }
+
+ public void setAddress(String address) {
+ this.address = address;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ IpInfo ipInfo = (IpInfo) o;
+ return Objects.equals(address, ipInfo.address);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(address);
+ }
+
+ @Override
+ public String toString() {
+ return "IpInfo{" +
+ "address='" + address + '\'' +
+ '}';
+ }
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Job.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Job.java
index 04b5611a8dab..cdeaf2ed8388 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Job.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Job.java
@@ -87,14 +87,14 @@ public String toString() {
}
public static class Links {
- @JsonProperty("message")
+ @JsonProperty("self")
private Self self;
public Self getSelf() { return self; }
public void setSelf(Self self) { this.self = self; }
}
public static class Self {
- @JsonProperty("message")
+ @JsonProperty("href")
private String href;
public String getHref() { return href; }
public void setHref(String href) { this.href = href; }
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/OntapStorage.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/OntapStorage.java
index eb56b4a5d5e5..8b450331b50a 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/OntapStorage.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/OntapStorage.java
@@ -26,14 +26,16 @@ public class OntapStorage {
private final String password;
private final String managementLIF;
private final String svmName;
+ private final Long size;
private final ProtocolType protocolType;
private final Boolean isDisaggregated;
- public OntapStorage(String username, String password, String managementLIF, String svmName, ProtocolType protocolType, Boolean isDisaggregated) {
+ public OntapStorage(String username, String password, String managementLIF, String svmName, Long size, ProtocolType protocolType, Boolean isDisaggregated) {
this.username = username;
this.password = password;
this.managementLIF = managementLIF;
this.svmName = svmName;
+ this.size = size;
this.protocolType = protocolType;
this.isDisaggregated = isDisaggregated;
}
@@ -54,6 +56,10 @@ public String getSvmName() {
return svmName;
}
+ public Long getSize() {
+ return size;
+ }
+
public ProtocolType getProtocol() {
return protocolType;
}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Svm.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Svm.java
index f1a226739365..65821739f1b2 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Svm.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Svm.java
@@ -144,4 +144,4 @@ public int hashCode() {
@JsonInclude(JsonInclude.Include.NON_NULL)
public static class Links { }
-}
\ No newline at end of file
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java
index 01b013f606dd..087a1ed69146 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java
@@ -29,6 +29,7 @@
import com.cloud.storage.Storage;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
+import com.cloud.storage.StoragePoolAutomation;
import com.cloud.utils.exception.CloudRuntimeException;
import com.google.common.base.Preconditions;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
@@ -38,8 +39,10 @@
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDetailsDao;
import org.apache.cloudstack.storage.datastore.lifecycle.BasePrimaryDataStoreLifeCycleImpl;
import org.apache.cloudstack.storage.feign.model.OntapStorage;
+import org.apache.cloudstack.storage.feign.model.Volume;
import org.apache.cloudstack.storage.provider.StorageProviderFactory;
import org.apache.cloudstack.storage.service.StorageStrategy;
import org.apache.cloudstack.storage.service.model.ProtocolType;
@@ -59,6 +62,8 @@ public class OntapPrimaryDatastoreLifecycle extends BasePrimaryDataStoreLifeCycl
@Inject private StorageManager _storageMgr;
@Inject private ResourceManager _resourceMgr;
@Inject private PrimaryDataStoreHelper _dataStoreHelper;
+ @Inject private PrimaryDataStoreDetailsDao _datastoreDetailsDao;
+ @Inject private StoragePoolAutomation _storagePoolAutomation;
private static final Logger s_logger = LogManager.getLogger(OntapPrimaryDatastoreLifecycle.class);
// ONTAP minimum volume size is 1.56 GB (1677721600 bytes)
@@ -186,12 +191,12 @@ public DataStore initialize(Map dsInfos) {
switch (protocol) {
case NFS3:
parameters.setType(Storage.StoragePoolType.NetworkFilesystem);
- path = details.get(Constants.MANAGEMENT_LIF) + ":/" + storagePoolName;
+ path = Constants.PATH_SEPARATOR + storagePoolName;
s_logger.info("Setting NFS path for storage pool: " + path);
break;
case ISCSI:
parameters.setType(Storage.StoragePoolType.Iscsi);
- path = "iqn.1992-08.com.netapp:" + details.get(Constants.SVM_NAME) + "." + storagePoolName;
+ path = Constants.PATH_SEPARATOR;
s_logger.info("Setting iSCSI path for storage pool: " + path);
break;
default:
@@ -199,30 +204,42 @@ public DataStore initialize(Map dsInfos) {
}
// Connect to ONTAP and create volume
+ long volumeSize = Long.parseLong(details.get(Constants.SIZE));
OntapStorage ontapStorage = new OntapStorage(
details.get(Constants.USERNAME),
details.get(Constants.PASSWORD),
details.get(Constants.MANAGEMENT_LIF),
details.get(Constants.SVM_NAME),
+ volumeSize,
protocol,
Boolean.parseBoolean(details.get(Constants.IS_DISAGGREGATED).toLowerCase()));
StorageStrategy storageStrategy = StorageProviderFactory.getStrategy(ontapStorage);
boolean isValid = storageStrategy.connect();
if (isValid) {
- long volumeSize = Long.parseLong(details.get(Constants.SIZE));
s_logger.info("Creating ONTAP volume '" + storagePoolName + "' with size: " + volumeSize + " bytes (" +
(volumeSize / (1024 * 1024 * 1024)) + " GB)");
- storageStrategy.createStorageVolume(storagePoolName, volumeSize);
+ Volume volume = storageStrategy.createStorageVolume(storagePoolName, volumeSize);
+ s_logger.info("ONTAP volume created successfully: " + volume.getName());
+ details.put(Constants.VOLUME_NAME, volume.getName());
+ details.put(Constants.VOLUME_UUID, volume.getUuid());
} else {
throw new CloudRuntimeException("ONTAP details validation failed, cannot create primary storage");
}
+ // Get the DataLIF for data access
+ String dataLIF = storageStrategy.getNetworkInterface();
+ if (dataLIF == null || dataLIF.isEmpty()) {
+ throw new CloudRuntimeException("Failed to retrieve Data LIF from ONTAP, cannot create primary storage");
+ }
+ s_logger.info("Using Data LIF for storage access: " + dataLIF);
+ details.put(Constants.DATA_LIF, dataLIF);
+
// Set parameters for primary data store
- parameters.setHost(details.get(Constants.MANAGEMENT_LIF));
+ parameters.setHost(details.get(Constants.DATA_LIF));
parameters.setPort(Constants.ONTAP_PORT);
parameters.setPath(path);
- parameters.setTags(tags != null ? tags : "");
+ parameters.setTags(tags);
parameters.setIsTagARule(isTagARule != null ? isTagARule : Boolean.FALSE);
parameters.setDetails(details);
parameters.setUuid(UUID.randomUUID().toString());
@@ -282,17 +299,49 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.Hyper
@Override
public boolean maintain(DataStore store) {
- return true;
+ _storagePoolAutomation.maintain(store);
+ return _dataStoreHelper.maintain(store);
}
@Override
public boolean cancelMaintain(DataStore store) {
- return true;
+ _storagePoolAutomation.cancelMaintain(store);
+ return _dataStoreHelper.cancelMaintain(store);
}
@Override
public boolean deleteDataStore(DataStore store) {
- return true;
+ // Deletion of underlying ONTAP volume
+ long storagePoolId = store.getId();
+ // Get the StoragePool details
+ StoragePool storagePool = _storageMgr.getStoragePool(storagePoolId);
+ if (storagePool == null) {
+ s_logger.warn("Storage pool not found for id: " + storagePoolId + ", cannot delete underlying ONTAP volume");
+ return false;// TODO: As the CS entity is not present, should we return true here?
+ }
+ Map details = _datastoreDetailsDao.listDetailsKeyPairs(storagePoolId);
+ // Set the Volume object for deletion
+ Volume volume = new Volume();
+ volume.setName(details.get(Constants.VOLUME_NAME));
+ volume.setUuid(details.get(Constants.VOLUME_UUID));
+ // Call Volume deletion through StorageStrategy
+ OntapStorage ontapStorage = new OntapStorage(
+ details.get(Constants.USERNAME),
+ details.get(Constants.PASSWORD),
+ details.get(Constants.MANAGEMENT_LIF),
+ details.get(Constants.SVM_NAME),
+ Long.parseLong(details.get(Constants.SIZE)),
+ ProtocolType.valueOf(details.get(Constants.PROTOCOL)),
+ Boolean.parseBoolean(details.get(Constants.IS_DISAGGREGATED).toLowerCase())
+ );
+ StorageStrategy storageStrategy = StorageProviderFactory.getStrategy(ontapStorage);
+ boolean isValid = storageStrategy.connect();
+ if (isValid) {
+ s_logger.info("Deleting ONTAP volume '" + volume.getName() + "' for storage pool id: " + storagePoolId);
+ storageStrategy.deleteStorageVolume(volume);
+ }
+
+ return _dataStoreHelper.deletePrimaryDataStore(store);
}
@Override
@@ -307,12 +356,12 @@ public void updateStoragePool(StoragePool storagePool, Map detai
@Override
public void enableStoragePool(DataStore store) {
-
+ _dataStoreHelper.enable(store);
}
@Override
public void disableStoragePool(DataStore store) {
-
+ _dataStoreHelper.disable(store);
}
@Override
@@ -325,4 +374,3 @@ public void changeStoragePoolScopeToCluster(DataStore store, ClusterScope cluste
}
}
-
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java
index 0f9706335784..6da00a92f132 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java
@@ -22,10 +22,13 @@
import com.cloud.utils.exception.CloudRuntimeException;
import feign.FeignException;
import org.apache.cloudstack.storage.feign.FeignClientFactory;
+import org.apache.cloudstack.storage.feign.client.AggregateFeignClient;
import org.apache.cloudstack.storage.feign.client.JobFeignClient;
+import org.apache.cloudstack.storage.feign.client.NetworkFeignClient;
import org.apache.cloudstack.storage.feign.client.SvmFeignClient;
import org.apache.cloudstack.storage.feign.client.VolumeFeignClient;
import org.apache.cloudstack.storage.feign.model.Aggregate;
+import org.apache.cloudstack.storage.feign.model.IpInterface;
import org.apache.cloudstack.storage.feign.model.Job;
import org.apache.cloudstack.storage.feign.model.OntapStorage;
import org.apache.cloudstack.storage.feign.model.Svm;
@@ -39,6 +42,7 @@
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
+import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
@@ -53,9 +57,11 @@
public abstract class StorageStrategy {
// Replace @Inject Feign clients with FeignClientFactory
private final FeignClientFactory feignClientFactory;
+ private final AggregateFeignClient aggregateFeignClient;
private final VolumeFeignClient volumeFeignClient;
private final SvmFeignClient svmFeignClient;
private final JobFeignClient jobFeignClient;
+ private final NetworkFeignClient networkFeignClient;
protected OntapStorage storage;
@@ -72,9 +78,11 @@ public StorageStrategy(OntapStorage ontapStorage) {
s_logger.info("Initializing StorageStrategy with base URL: " + baseURL);
// Initialize FeignClientFactory and create clients
this.feignClientFactory = new FeignClientFactory();
+ this.aggregateFeignClient = feignClientFactory.createClient(AggregateFeignClient.class, baseURL);
this.volumeFeignClient = feignClientFactory.createClient(VolumeFeignClient.class, baseURL);
this.svmFeignClient = feignClientFactory.createClient(SvmFeignClient.class, baseURL);
this.jobFeignClient = feignClientFactory.createClient(JobFeignClient.class, baseURL);
+ this.networkFeignClient = feignClientFactory.createClient(NetworkFeignClient.class, baseURL);
}
// Connect method to validate ONTAP cluster, credentials, protocol, and SVM
@@ -110,12 +118,31 @@ public boolean connect() {
s_logger.error("iSCSI protocol is not enabled on SVM " + svmName);
throw new CloudRuntimeException("iSCSI protocol is not enabled on SVM " + svmName);
}
+ // TODO: Implement logic to select appropriate aggregate based on storage requirements
List aggrs = svm.getAggregates();
if (aggrs == null || aggrs.isEmpty()) {
s_logger.error("No aggregates are assigned to SVM " + svmName);
throw new CloudRuntimeException("No aggregates are assigned to SVM " + svmName);
}
- this.aggregates = aggrs;
+ // Set the aggregates which are according to the storage requirements
+ for (Aggregate aggr : aggrs) {
+ s_logger.debug("Found aggregate: " + aggr.getName() + " with UUID: " + aggr.getUuid());
+ Aggregate aggrResp = aggregateFeignClient.getAggregateByUUID(authHeader, aggr.getUuid());
+ if (!Objects.equals(aggrResp.getState(), Aggregate.StateEnum.ONLINE)) {
+ s_logger.warn("Aggregate " + aggr.getName() + " is not in online state. Skipping this aggregate.");
+ continue;
+ } else if (aggrResp.getSpace() == null || aggrResp.getAvailableBlockStorageSpace() == null ||
+ aggrResp.getAvailableBlockStorageSpace() <= storage.getSize().doubleValue()) {
+ s_logger.warn("Aggregate " + aggr.getName() + " does not have sufficient available space. Skipping this aggregate.");
+ continue;
+ }
+ s_logger.info("Selected aggregate: " + aggr.getName() + " for volume operations.");
+ this.aggregates = List.of(aggr);
+ }
+ if (this.aggregates == null || this.aggregates.isEmpty()) {
+ s_logger.error("No suitable aggregates found on SVM " + svmName + " for volume creation.");
+ throw new CloudRuntimeException("No suitable aggregates found on SVM " + svmName + " for volume creation.");
+ }
s_logger.info("Successfully connected to ONTAP cluster and validated ONTAP details provided");
} catch (Exception e) {
throw new CloudRuntimeException("Failed to connect to ONTAP cluster: " + e.getMessage(), e);
@@ -131,7 +158,7 @@ public boolean connect() {
* throw exception in case of disaggregated ONTAP storage
*
* @param volumeName the name of the volume to create
- * @param size the size of the volume in bytes
+ * @param size the size of the volume in bytes
* @return the created Volume object
*/
public Volume createStorageVolume(String volumeName, Long size) {
@@ -152,7 +179,10 @@ public Volume createStorageVolume(String volumeName, Long size) {
volumeRequest.setName(volumeName);
volumeRequest.setSvm(svm);
- volumeRequest.setAggregates(aggregates);
+ Aggregate aggr = new Aggregate();
+ aggr.setName(aggregates.get(0).getName());
+ aggr.setUuid(aggregates.get(0).getUuid());
+ volumeRequest.setAggregates(List.of(aggr));
volumeRequest.setSize(size);
// Make the POST API call to create the volume
try {
@@ -165,35 +195,34 @@ public Volume createStorageVolume(String volumeName, Long size) {
String jobUUID = jobResponse.getJob().getUuid();
//Create URI for GET Job API
- int jobRetryCount = 0;
- Job createVolumeJob = null;
- while(createVolumeJob == null || !createVolumeJob.getState().equals(Constants.JOB_SUCCESS)) {
- if(jobRetryCount >= Constants.JOB_MAX_RETRIES) {
- s_logger.error("Job to create volume " + volumeName + " did not complete within expected time.");
- throw new CloudRuntimeException("Job to create volume " + volumeName + " did not complete within expected time.");
- }
-
- try {
- createVolumeJob = jobFeignClient.getJobByUUID(authHeader, jobUUID);
- if (createVolumeJob == null) {
- s_logger.warn("Job with UUID " + jobUUID + " not found. Retrying...");
- } else if (createVolumeJob.getState().equals(Constants.JOB_FAILURE)) {
- throw new CloudRuntimeException("Job to create volume " + volumeName + " failed with error: " + createVolumeJob.getMessage());
- }
- } catch (FeignException.FeignClientException e) {
- throw new CloudRuntimeException("Failed to fetch job status: " + e.getMessage());
- }
-
- jobRetryCount++;
- Thread.sleep(Constants.CREATE_VOLUME_CHECK_SLEEP_TIME); // Sleep for 2 seconds before polling again
+ Boolean jobSucceeded = jobPollForSuccess(jobUUID);
+ if (!jobSucceeded) {
+ s_logger.error("Volume creation job failed for volume: " + volumeName);
+ throw new CloudRuntimeException("Volume creation job failed for volume: " + volumeName);
}
+ s_logger.info("Volume creation job completed successfully for volume: " + volumeName);
} catch (Exception e) {
s_logger.error("Exception while creating volume: ", e);
throw new CloudRuntimeException("Failed to create volume: " + e.getMessage());
}
+ // Verify if the Volume has been created and set the Volume object
+ // Call the VolumeFeignClient to get the created volume details
+ OntapResponse volumesResponse = volumeFeignClient.getAllVolumes(authHeader, Map.of(Constants.NAME, volumeName));
+ if (volumesResponse == null || volumesResponse.getRecords() == null || volumesResponse.getRecords().isEmpty()) {
+ s_logger.error("Volume " + volumeName + " not found after creation.");
+ throw new CloudRuntimeException("Volume " + volumeName + " not found after creation.");
+ }
+ Volume createdVolume = volumesResponse.getRecords().get(0);
+ if (createdVolume == null) {
+ s_logger.error("Failed to retrieve details of the created volume " + volumeName);
+ throw new CloudRuntimeException("Failed to retrieve details of the created volume " + volumeName);
+ } else if (createdVolume.getName() == null || !createdVolume.getName().equals(volumeName)) {
+ s_logger.error("Mismatch in created volume name. Expected: " + volumeName + ", Found: " + createdVolume.getName());
+ throw new CloudRuntimeException("Mismatch in created volume name. Expected: " + volumeName + ", Found: " + createdVolume.getName());
+ }
s_logger.info("Volume created successfully: " + volumeName);
- //TODO
- return null;
+ // Return the created Volume object
+ return createdVolume;
}
/**
@@ -204,8 +233,7 @@ public Volume createStorageVolume(String volumeName, Long size) {
* @param volume the volume to update
* @return the updated Volume object
*/
- public Volume updateStorageVolume(Volume volume)
- {
+ public Volume updateStorageVolume(Volume volume) {
//TODO
return null;
}
@@ -217,9 +245,24 @@ public Volume updateStorageVolume(Volume volume)
*
* @param volume the volume to delete
*/
- public void deleteStorageVolume(Volume volume)
- {
- //TODO
+ public void deleteStorageVolume(Volume volume) {
+ s_logger.info("Deleting ONTAP volume by name: " + volume.getName() + " and uuid: " + volume.getUuid());
+ // Calling the VolumeFeignClient to delete the volume
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ try {
+ // TODO: Implement lun and file deletion, if any, before deleting the volume
+ JobResponse jobResponse = volumeFeignClient.deleteVolume(authHeader, volume.getUuid());
+ Boolean jobSucceeded = jobPollForSuccess(jobResponse.getJob().getUuid());
+ if (!jobSucceeded) {
+ s_logger.error("Volume deletion job failed for volume: " + volume.getName());
+ throw new CloudRuntimeException("Volume deletion job failed for volume: " + volume.getName());
+ }
+ s_logger.info("Volume deleted successfully: " + volume.getName());
+ } catch (FeignException.FeignClientException e) {
+ s_logger.error("Exception while deleting volume: ", e);
+ throw new CloudRuntimeException("Failed to delete volume: " + e.getMessage());
+ }
+ s_logger.info("ONTAP volume deletion process completed for volume: " + volume.getName());
}
/**
@@ -230,18 +273,62 @@ public void deleteStorageVolume(Volume volume)
* @param volume the volume to retrieve
* @return the retrieved Volume object
*/
- public Volume getStorageVolume(Volume volume)
- {
+ public Volume getStorageVolume(Volume volume) {
//TODO
return null;
}
+ /**
+ * Get the network ip interface
+ *
+ * @return the network interface ip as a String
+ */
+
+ public String getNetworkInterface() {
+ // Feign call to get network interfaces
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ try {
+ Map queryParams = new HashMap<>();
+ queryParams.put(Constants.SVMDOTNAME, storage.getSvmName());
+ if (storage.getProtocol() != null) {
+ switch (storage.getProtocol()) {
+ case NFS3:
+ queryParams.put(Constants.SERVICES, Constants.DATA_NFS);
+ break;
+ case ISCSI:
+ queryParams.put(Constants.SERVICES, Constants.DATA_ISCSI);
+ break;
+ default:
+ s_logger.error("Unsupported protocol: " + storage.getProtocol());
+ throw new CloudRuntimeException("Unsupported protocol: " + storage.getProtocol());
+ }
+ }
+ queryParams.put(Constants.FIELDS, Constants.IP_ADDRESS);
+ queryParams.put(Constants.RETURN_RECORDS, Constants.TRUE);
+ OntapResponse response =
+ networkFeignClient.getNetworkIpInterfaces(authHeader, queryParams);
+ if (response != null && response.getRecords() != null && !response.getRecords().isEmpty()) {
+ // For simplicity, return the first interface's name
+ IpInterface ipInterface = response.getRecords().get(0);
+ s_logger.info("Retrieved network interface: " + ipInterface.getIp().getAddress());
+ return ipInterface.getIp().getAddress();
+ } else {
+ throw new CloudRuntimeException("No network interfaces found for SVM " + storage.getSvmName() +
+ " for protocol " + storage.getProtocol());
+ }
+ } catch (FeignException.FeignClientException e) {
+ s_logger.error("Exception while retrieving network interfaces: ", e);
+ throw new CloudRuntimeException("Failed to retrieve network interfaces: " + e.getMessage());
+ }
+ }
+
/**
* Method encapsulates the behavior based on the opted protocol in subclasses.
* it is going to mimic
- * createLun for iSCSI, FC protocols
- * createFile for NFS3.0 and NFS4.1 protocols
- * createNameSpace for Nvme/TCP and Nvme/FC protocol
+ * createLun for iSCSI, FC protocols
+ * createFile for NFS3.0 and NFS4.1 protocols
+ * createNameSpace for Nvme/TCP and Nvme/FC protocol
+ *
* @param cloudstackVolume the CloudStack volume to create
* @return the created CloudStackVolume object
*/
@@ -250,9 +337,10 @@ public Volume getStorageVolume(Volume volume)
/**
* Method encapsulates the behavior based on the opted protocol in subclasses.
* it is going to mimic
- * updateLun for iSCSI, FC protocols
- * updateFile for NFS3.0 and NFS4.1 protocols
- * updateNameSpace for Nvme/TCP and Nvme/FC protocol
+ * updateLun for iSCSI, FC protocols
+ * updateFile for NFS3.0 and NFS4.1 protocols
+ * updateNameSpace for Nvme/TCP and Nvme/FC protocol
+ *
* @param cloudstackVolume the CloudStack volume to update
* @return the updated CloudStackVolume object
*/
@@ -261,9 +349,10 @@ public Volume getStorageVolume(Volume volume)
/**
* Method encapsulates the behavior based on the opted protocol in subclasses.
* it is going to mimic
- * deleteLun for iSCSI, FC protocols
- * deleteFile for NFS3.0 and NFS4.1 protocols
- * deleteNameSpace for Nvme/TCP and Nvme/FC protocol
+ * deleteLun for iSCSI, FC protocols
+ * deleteFile for NFS3.0 and NFS4.1 protocols
+ * deleteNameSpace for Nvme/TCP and Nvme/FC protocol
+ *
* @param cloudstackVolume the CloudStack volume to delete
*/
abstract void deleteCloudStackVolume(CloudStackVolume cloudstackVolume);
@@ -271,9 +360,10 @@ public Volume getStorageVolume(Volume volume)
/**
* Method encapsulates the behavior based on the opted protocol in subclasses.
* it is going to mimic
- * getLun for iSCSI, FC protocols
- * getFile for NFS3.0 and NFS4.1 protocols
- * getNameSpace for Nvme/TCP and Nvme/FC protocol
+ * getLun for iSCSI, FC protocols
+ * getFile for NFS3.0 and NFS4.1 protocols
+ * getNameSpace for Nvme/TCP and Nvme/FC protocol
+ *
* @param cloudstackVolume the CloudStack volume to retrieve
* @return the retrieved CloudStackVolume object
*/
@@ -281,9 +371,10 @@ public Volume getStorageVolume(Volume volume)
/**
* Method encapsulates the behavior based on the opted protocol in subclasses
- * createiGroup for iSCSI and FC protocols
- * createExportPolicy for NFS 3.0 and NFS 4.1 protocols
- * createSubsystem for Nvme/TCP and Nvme/FC protocols
+ * createiGroup for iSCSI and FC protocols
+ * createExportPolicy for NFS 3.0 and NFS 4.1 protocols
+ * createSubsystem for Nvme/TCP and Nvme/FC protocols
+ *
* @param accessGroup the access group to create
* @return the created AccessGroup object
*/
@@ -291,18 +382,20 @@ public Volume getStorageVolume(Volume volume)
/**
* Method encapsulates the behavior based on the opted protocol in subclasses
- * deleteiGroup for iSCSI and FC protocols
- * deleteExportPolicy for NFS 3.0 and NFS 4.1 protocols
- * deleteSubsystem for Nvme/TCP and Nvme/FC protocols
+ * deleteiGroup for iSCSI and FC protocols
+ * deleteExportPolicy for NFS 3.0 and NFS 4.1 protocols
+ * deleteSubsystem for Nvme/TCP and Nvme/FC protocols
+ *
* @param accessGroup the access group to delete
*/
abstract void deleteAccessGroup(AccessGroup accessGroup);
/**
* Method encapsulates the behavior based on the opted protocol in subclasses
- * updateiGroup example add/remove-Iqn for iSCSI and FC protocols
- * updateExportPolicy example add/remove-Rule for NFS 3.0 and NFS 4.1 protocols
- * //TODO for Nvme/TCP and Nvme/FC protocols
+ * updateiGroup example add/remove-Iqn for iSCSI and FC protocols
+ * updateExportPolicy example add/remove-Rule for NFS 3.0 and NFS 4.1 protocols
+ * //TODO for Nvme/TCP and Nvme/FC protocols
+ *
* @param accessGroup the access group to update
* @return the updated AccessGroup object
*/
@@ -310,9 +403,10 @@ public Volume getStorageVolume(Volume volume)
/**
* Method encapsulates the behavior based on the opted protocol in subclasses
- * getiGroup for iSCSI and FC protocols
- * getExportPolicy for NFS 3.0 and NFS 4.1 protocols
- * getNameSpace for Nvme/TCP and Nvme/FC protocols
+ * getiGroup for iSCSI and FC protocols
+ * getExportPolicy for NFS 3.0 and NFS 4.1 protocols
+ * getNameSpace for Nvme/TCP and Nvme/FC protocols
+ *
* @param accessGroup the access group to retrieve
* @return the retrieved AccessGroup object
*/
@@ -320,17 +414,56 @@ public Volume getStorageVolume(Volume volume)
/**
* Method encapsulates the behavior based on the opted protocol in subclasses
- * lunMap for iSCSI and FC protocols
- * //TODO for Nvme/TCP and Nvme/FC protocols
+ * lunMap for iSCSI and FC protocols
+ * //TODO for Nvme/TCP and Nvme/FC protocols
+ *
* @param values
*/
- abstract void enableLogicalAccess(Map values);
+ abstract void enableLogicalAccess(Map values);
/**
* Method encapsulates the behavior based on the opted protocol in subclasses
- * lunUnmap for iSCSI and FC protocols
- * //TODO for Nvme/TCP and Nvme/FC protocols
+ * lunUnmap for iSCSI and FC protocols
+ * //TODO for Nvme/TCP and Nvme/FC protocols
+ *
* @param values
*/
- abstract void disableLogicalAccess(Map values);
+ abstract void disableLogicalAccess(Map values);
+
+ private Boolean jobPollForSuccess(String jobUUID) {
+ //Create URI for GET Job API
+ int jobRetryCount = 0;
+ Job jobResp = null;
+ try {
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ while (jobResp == null || !jobResp.getState().equals(Constants.JOB_SUCCESS)) {
+ if (jobRetryCount >= Constants.JOB_MAX_RETRIES) {
+ s_logger.error("Job did not complete within expected time.");
+ throw new CloudRuntimeException("Job did not complete within expected time.");
+ }
+
+ try {
+ jobResp = jobFeignClient.getJobByUUID(authHeader, jobUUID);
+ if (jobResp == null) {
+ s_logger.warn("Job with UUID " + jobUUID + " not found. Retrying...");
+ } else if (jobResp.getState().equals(Constants.JOB_FAILURE)) {
+ throw new CloudRuntimeException("Job failed with error: " + jobResp.getMessage());
+ }
+ } catch (FeignException.FeignClientException e) {
+ throw new CloudRuntimeException("Failed to fetch job status: " + e.getMessage());
+ }
+
+ jobRetryCount++;
+ Thread.sleep(Constants.CREATE_VOLUME_CHECK_SLEEP_TIME); // Sleep for 2 seconds before polling again
+ }
+ if (jobResp == null || !jobResp.getState().equals(Constants.JOB_SUCCESS)) {
+ return false;
+ }
+ } catch (FeignException.FeignClientException e) {
+ throw new CloudRuntimeException("Failed to fetch job status: " + e.getMessage());
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ return true;
+ }
}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java
index b58e8484cd48..cceccafc2876 100644
--- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java
@@ -19,6 +19,7 @@
package org.apache.cloudstack.storage.utils;
+
public class Constants {
public static final String NFS = "nfs";
@@ -28,7 +29,10 @@ public class Constants {
public static final String SVM_NAME = "svmName";
public static final String USERNAME = "username";
public static final String PASSWORD = "password";
+ public static final String DATA_LIF = "dataLIF";
public static final String MANAGEMENT_LIF = "managementLIF";
+ public static final String VOLUME_NAME = "volumeName";
+ public static final String VOLUME_UUID = "volumeUUID";
public static final String IS_DISAGGREGATED = "isDisaggregated";
public static final String RUNNING = "running";
@@ -40,11 +44,20 @@ public class Constants {
public static final String JOB_FAILURE = "failure";
public static final String JOB_SUCCESS = "success";
+ public static final String TRUE = "true";
+ public static final String FALSE = "false";
+
// Query params
public static final String NAME = "name";
public static final String FIELDS = "fields";
public static final String AGGREGATES = "aggregates";
public static final String STATE = "state";
+ public static final String SVMDOTNAME = "svm.name";
+ public static final String DATA_NFS = "data_nfs";
+ public static final String DATA_ISCSI = "data_iscsi";
+ public static final String IP_ADDRESS = "ip.address";
+ public static final String SERVICES = "services";
+ public static final String RETURN_RECORDS = "return_records";
public static final int JOB_MAX_RETRIES = 100;
public static final int CREATE_VOLUME_CHECK_SLEEP_TIME = 2000;