Skip to content

Commit ddb119f

Browse files
conflicts are resolvd which are originated from rebase
1 parent 0f5370a commit ddb119f

15 files changed

Lines changed: 657 additions & 551 deletions

plugins/storage/volume/ontap/pom.xml

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@
3939
<junit-jupiter.version>5.8.1</junit-jupiter.version>
4040
<mockito.version>3.12.4</mockito.version>
4141
<mockito-junit-jupiter.version>5.2.0</mockito-junit-jupiter.version>
42+
<byte-buddy-agent.version>1.11.13</byte-buddy-agent.version>
4243
</properties>
4344
<dependencyManagement>
4445
<dependencies>
@@ -121,12 +122,24 @@
121122
<version>${mockito.version}</version>
122123
<scope>test</scope>
123124
</dependency>
125+
<dependency>
126+
<groupId>net.bytebuddy</groupId>
127+
<artifactId>byte-buddy-agent</artifactId>
128+
<version>${byte-buddy-agent.version}</version>
129+
<scope>test</scope>
130+
</dependency>
124131
<dependency>
125132
<groupId>org.assertj</groupId>
126133
<artifactId>assertj-core</artifactId>
127134
<version>${assertj.version}</version>
128135
<scope>test</scope>
129136
</dependency>
137+
<dependency>
138+
<groupId>org.apache.cloudstack</groupId>
139+
<artifactId>cloud-engine-storage-snapshot</artifactId>
140+
<version>4.23.0.0-SNAPSHOT</version>
141+
<scope>compile</scope>
142+
</dependency>
130143
</dependencies>
131144
<repositories>
132145
<repository>
@@ -151,6 +164,7 @@
151164
<version>${maven-surefire-plugin.version}</version>
152165
<configuration>
153166
<skipTests>false</skipTests>
167+
<argLine>-javaagent:${settings.localRepository}/net/bytebuddy/byte-buddy-agent/${byte-buddy-agent.version}/byte-buddy-agent-${byte-buddy-agent.version}.jar</argLine>
154168
<includes>
155169
<include>**/*Test.java</include>
156170
</includes>

plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java

Lines changed: 96 additions & 96 deletions
Large diffs are not rendered by default.

plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java

Lines changed: 33 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,6 @@
3131
import com.cloud.storage.StorageManager;
3232
import com.cloud.storage.StoragePool;
3333
import com.cloud.storage.StoragePoolAutomation;
34-
import com.cloud.utils.StringUtils;
3534
import com.cloud.utils.exception.CloudRuntimeException;
3635
import com.google.common.base.Preconditions;
3736
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
@@ -60,7 +59,6 @@
6059

6160
import javax.inject.Inject;
6261
import java.util.ArrayList;
63-
import java.util.HashSet;
6462
import java.util.List;
6563
import java.util.Map;
6664
import java.util.Set;
@@ -107,7 +105,7 @@ public DataStore initialize(Map<String, Object> dsInfos) {
107105
@SuppressWarnings("unchecked")
108106
Map<String, String> details = (Map<String, String>) dsInfos.get("details");
109107

110-
capacityBytes = validateInitializeInputs(capacityBytes, podId, clusterId, zoneId, storagePoolName, providerName, managed, url, details);
108+
capacityBytes = validateInitializeInputs(capacityBytes, podId, clusterId, zoneId, storagePoolName, providerName, managed, details);
111109

112110
PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
113111
if (clusterId != null) {
@@ -128,11 +126,10 @@ public DataStore initialize(Map<String, Object> dsInfos) {
128126
OntapStorage ontapStorage = new OntapStorage(
129127
details.get(OntapStorageConstants.USERNAME),
130128
details.get(OntapStorageConstants.PASSWORD),
131-
details.get(OntapStorageConstants.MANAGEMENT_LIF),
129+
details.get(OntapStorageConstants.STORAGE_IP),
132130
details.get(OntapStorageConstants.SVM_NAME),
133131
capacityBytes,
134-
protocol,
135-
Boolean.parseBoolean(details.get(OntapStorageConstants.IS_DISAGGREGATED).toLowerCase()));
132+
protocol);
136133

137134
StorageStrategy storageStrategy = StorageProviderFactory.getStrategy(ontapStorage);
138135
boolean isValid = storageStrategy.connect();
@@ -172,7 +169,7 @@ public DataStore initialize(Map<String, Object> dsInfos) {
172169
path = OntapStorageConstants.SLASH + storagePoolName;
173170
port = OntapStorageConstants.NFS3_PORT;
174171
// Force NFSv3 for ONTAP managed storage to avoid NFSv4 ID mapping issues
175-
details.put(OntapStorageConstants.NFS_MOUNT_OPTIONS,Constants.NFS3_MOUNT_OPTIONS_VER_3);
172+
details.put(OntapStorageConstants.NFS_MOUNT_OPTIONS, OntapStorageConstants.NFS3_MOUNT_OPTIONS_VER_3);
176173
logger.info("Setting NFS path for storage pool: " + path + ", port: " + port + " with mount option: vers=3");
177174
break;
178175
case ISCSI:
@@ -205,9 +202,9 @@ public DataStore initialize(Map<String, Object> dsInfos) {
205202
}
206203

207204
private long validateInitializeInputs(Long capacityBytes, Long podId, Long clusterId, Long zoneId,
208-
String storagePoolName, String providerName, boolean managed, String url, Map<String, String> details) {
205+
String storagePoolName, String providerName, boolean managed, Map<String, String> details) {
209206

210-
// Capacity validation
207+
// Validate and set capacity
211208
if (capacityBytes == null || capacityBytes <= 0) {
212209
logger.warn("capacityBytes not provided or invalid (" + capacityBytes + "), using ONTAP minimum size: " + ONTAP_MIN_VOLUME_SIZE_IN_BYTES);
213210
capacityBytes = ONTAP_MIN_VOLUME_SIZE_IN_BYTES;
@@ -216,70 +213,67 @@ private long validateInitializeInputs(Long capacityBytes, Long podId, Long clust
216213
capacityBytes = ONTAP_MIN_VOLUME_SIZE_IN_BYTES;
217214
}
218215

219-
// Scope (pod/cluster/zone) validation
216+
// Validate scope
220217
if (podId == null ^ clusterId == null) {
221218
throw new CloudRuntimeException("Cluster Id or Pod Id is null, cannot create primary storage");
222219
}
223-
if (podId == null && clusterId == null) {
220+
221+
if (podId == null) {
224222
if (zoneId != null) {
225223
logger.info("Both Pod Id and Cluster Id are null, Primary storage pool will be associated with a Zone");
226224
} else {
227225
throw new CloudRuntimeException("Pod Id, Cluster Id and Zone Id are all null, cannot create primary storage");
228226
}
229227
}
230228

231-
// Basic parameter validation
232-
if (StringUtils.isBlank(storagePoolName)) {
229+
if (storagePoolName == null || storagePoolName.isEmpty()) {
233230
throw new CloudRuntimeException("Storage pool name is null or empty, cannot create primary storage");
234231
}
235-
if (StringUtils.isBlank(providerName)) {
232+
233+
if (providerName == null || providerName.isEmpty()) {
236234
throw new CloudRuntimeException("Provider name is null or empty, cannot create primary storage");
237235
}
236+
237+
PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
238+
if (clusterId != null) {
239+
ClusterVO clusterVO = _clusterDao.findById(clusterId);
240+
Preconditions.checkNotNull(clusterVO, "Unable to locate the specified cluster");
241+
if (clusterVO.getHypervisorType() != Hypervisor.HypervisorType.KVM) {
242+
throw new CloudRuntimeException("ONTAP primary storage is supported only for KVM hypervisor");
243+
}
244+
parameters.setHypervisorType(clusterVO.getHypervisorType());
245+
}
246+
238247
logger.debug("ONTAP primary storage will be created as " + (managed ? "managed" : "unmanaged"));
239248
if (!managed) {
240249
throw new CloudRuntimeException("ONTAP primary storage must be managed");
241250
}
242251

243-
// Details key validation
252+
//Required ONTAP detail keys
244253
Set<String> requiredKeys = Set.of(
245254
OntapStorageConstants.USERNAME,
246255
OntapStorageConstants.PASSWORD,
247256
OntapStorageConstants.SVM_NAME,
248257
OntapStorageConstants.PROTOCOL,
249-
OntapStorageConstants.MANAGEMENT_LIF
258+
OntapStorageConstants.STORAGE_IP
250259
);
251-
Set<String> optionalKeys = Set.of(
252-
OntapStorageConstants.IS_DISAGGREGATED
253-
);
254-
Set<String> allowedKeys = new java.util.HashSet<>(requiredKeys);
255-
allowedKeys.addAll(optionalKeys);
256-
257-
if (StringUtils.isNotBlank(url)) {
258-
for (String segment : url.split(OntapStorageConstants.SEMICOLON)) {
259-
if (segment.isEmpty()) {
260-
continue;
261-
}
262-
String[] kv = segment.split(OntapStorageConstants.EQUALS, 2);
263-
if (kv.length == 2) {
264-
details.put(kv[0].trim(), kv[1].trim());
265-
}
266-
}
267-
}
268260

261+
// Validate existing entries (reject unexpected keys, empty values)
269262
for (Map.Entry<String, String> e : details.entrySet()) {
270263
String key = e.getKey();
271264
String val = e.getValue();
272-
if (!allowedKeys.contains(key)) {
265+
if (!requiredKeys.contains(key)) {
273266
throw new CloudRuntimeException("Unexpected ONTAP detail key in URL: " + key);
274267
}
275-
if (StringUtils.isBlank(val)) {
268+
if (val == null || val.isEmpty()) {
276269
throw new CloudRuntimeException("ONTAP primary storage creation failed, empty detail: " + key);
277270
}
278271
}
279272

280-
Set<String> providedKeys = new HashSet<>(details.keySet());
273+
// Detect missing required keys
274+
Set<String> providedKeys = new java.util.HashSet<>(details.keySet());
281275
if (!providedKeys.containsAll(requiredKeys)) {
282-
Set<String> missing = new HashSet<>(requiredKeys);
276+
Set<String> missing = new java.util.HashSet<>(requiredKeys);
283277
missing.removeAll(providedKeys);
284278
throw new CloudRuntimeException("ONTAP primary storage creation failed, missing detail(s): " + missing);
285279
}
@@ -317,7 +311,7 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) {
317311
}
318312
logger.debug("attachCluster: Attaching the pool to each of the host in the cluster: {}", primaryStore.getClusterId());
319313
// We need to create export policy at pool level and igroup at host level(in grantAccess)
320-
if (ProtocolType.NFS3.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) {
314+
if (ProtocolType.NFS3.name().equalsIgnoreCase(details.get(OntapStorageConstants.PROTOCOL))) {
321315
// If there are no eligible host, export policy or igroup will not be created and will be taken as part of HostListener
322316
if (!hostsIdentifier.isEmpty()) {
323317
try {
@@ -383,7 +377,7 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.Hyper
383377
}
384378

385379
// We need to create export policy at pool level and igroup at host level
386-
if (ProtocolType.NFS3.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) {
380+
if (ProtocolType.NFS3.name().equalsIgnoreCase(details.get(OntapStorageConstants.PROTOCOL))) {
387381
// If there are no eligible host, export policy or igroup will not be created and will be taken as part of HostListener
388382
if (!hostsIdentifier.isEmpty()) {
389383
try {

plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -257,7 +257,7 @@ public Volume createStorageVolume(String volumeName, Long size) {
257257
}
258258
String jobUUID = jobResponse.getJob().getUuid();
259259

260-
Boolean jobSucceeded = jobPollForSuccess(jobUUID);
260+
Boolean jobSucceeded = jobPollForSuccess(jobUUID,OntapStorageConstants.JOB_MAX_RETRIES, OntapStorageConstants.CREATE_VOLUME_CHECK_SLEEP_TIME);
261261
if (!jobSucceeded) {
262262
logger.error("Volume creation job failed for volume: " + volumeName);
263263
throw new CloudRuntimeException("Volume creation job failed for volume: " + volumeName);
@@ -341,7 +341,7 @@ public void deleteStorageVolume(Volume volume) {
341341
try {
342342
// TODO: Implement lun and file deletion, if any, before deleting the volume
343343
JobResponse jobResponse = volumeFeignClient.deleteVolume(authHeader, volume.getUuid());
344-
Boolean jobSucceeded = jobPollForSuccess(jobResponse.getJob().getUuid());
344+
Boolean jobSucceeded = jobPollForSuccess(jobResponse.getJob().getUuid(),OntapStorageConstants.JOB_MAX_RETRIES, OntapStorageConstants.CREATE_VOLUME_CHECK_SLEEP_TIME);
345345
if (!jobSucceeded) {
346346
logger.error("Volume deletion job failed for volume: " + volume.getName());
347347
throw new CloudRuntimeException("Volume deletion job failed for volume: " + volume.getName());

0 commit comments

Comments
 (0)