Skip to content

Commit 25353c2

Browse files
sandeeplocharlaLocharla, Sandeep
andauthored
CSTACKEX-7: ONTAP Primary storage pool (#9)
* CSTACKEX-7: ONTAP Primary storage pool --------- Co-authored-by: Locharla, Sandeep <[email protected]>
1 parent a492797 commit 25353c2

File tree

13 files changed

+752
-14
lines changed

13 files changed

+752
-14
lines changed

plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/SvmFeignClient.java

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,10 @@
2626
import org.springframework.web.bind.annotation.RequestHeader;
2727
import org.springframework.web.bind.annotation.RequestMapping;
2828
import org.springframework.web.bind.annotation.RequestMethod;
29+
import org.springframework.web.bind.annotation.RequestParam;
2930

3031
import java.net.URI;
32+
import java.util.Map;
3133

3234
@FeignClient(name = "SvmClient", url = "https://{clusterIP}/api/svm/svms", configuration = FeignConfiguration.class)
3335
public interface SvmFeignClient {

plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -30,21 +30,23 @@
3030
import org.springframework.web.bind.annotation.RequestBody;
3131
import org.springframework.web.bind.annotation.RequestMethod;
3232

33+
import java.net.URI;
34+
3335

3436
@Lazy
3537
@FeignClient(name = "VolumeClient", url = "https://{clusterIP}/api/storage/volumes", configuration = FeignConfiguration.class)
3638
public interface VolumeFeignClient {
3739

3840
@RequestMapping(method = RequestMethod.DELETE, value="/{uuid}")
39-
void deleteVolume(@RequestHeader("Authorization") String authHeader, @PathVariable("uuid") String uuid);
41+
void deleteVolume(URI baseURL, @RequestHeader("Authorization") String authHeader, @PathVariable("uuid") String uuid);
4042

4143
@RequestMapping(method = RequestMethod.POST)
42-
JobResponse createVolumeWithJob(@RequestHeader("Authorization") String authHeader, @RequestBody Volume volumeRequest);
44+
JobResponse createVolumeWithJob(URI baseURL, @RequestHeader("Authorization") String authHeader, @RequestBody Volume volumeRequest);
4345

4446
@RequestMapping(method = RequestMethod.GET, value="/{uuid}")
45-
Volume getVolumeByUUID(@RequestHeader("Authorization") String authHeader, @PathVariable("uuid") String uuid);
47+
Volume getVolumeByUUID(URI baseURL, @RequestHeader("Authorization") String authHeader, @PathVariable("uuid") String uuid);
4648

4749
@RequestMapping(method = RequestMethod.PATCH)
48-
JobResponse updateVolumeRebalancing(@RequestHeader("accept") String acceptHeader, @PathVariable("uuid") String uuid, @RequestBody Volume volumeRequest);
50+
JobResponse updateVolumeRebalancing(URI baseURL, @RequestHeader("accept") String acceptHeader, @PathVariable("uuid") String uuid, @RequestBody Volume volumeRequest);
4951

5052
}
Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,88 @@
1+
/*
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing,
13+
* software distributed under the License is distributed on an
14+
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15+
* KIND, either express or implied. See the License for the
16+
* specific language governing permissions and limitations
17+
* under the License.
18+
*/
19+
20+
package org.apache.cloudstack.storage.feign.model;
21+
22+
import org.apache.cloudstack.storage.utils.Constants.ProtocolType;
23+
24+
public class OntapStorage {
25+
public static String _username;
26+
public static String _password;
27+
public static String _managementLIF;
28+
public static String _svmName;
29+
public static ProtocolType _protocolType;
30+
public static Boolean _isDisaggregated;
31+
32+
public OntapStorage(String username, String password, String managementLIF, String svmName, ProtocolType protocolType, Boolean isDisaggregated) {
33+
_username = username;
34+
_password = password;
35+
_managementLIF = managementLIF;
36+
_svmName = svmName;
37+
_protocolType = protocolType;
38+
_isDisaggregated = isDisaggregated;
39+
}
40+
41+
public String getUsername() {
42+
return _username;
43+
}
44+
45+
public void setUsername(String username) {
46+
_username = username;
47+
}
48+
49+
public String getPassword() {
50+
return _password;
51+
}
52+
53+
public void setPassword(String password) {
54+
_password = password;
55+
}
56+
57+
public String getManagementLIF() {
58+
return _managementLIF;
59+
}
60+
61+
public void setManagementLIF(String managementLIF) {
62+
_managementLIF = managementLIF;
63+
}
64+
65+
public String getSvmName() {
66+
return _svmName;
67+
}
68+
69+
public void setSvmName(String svmName) {
70+
_svmName = svmName;
71+
}
72+
73+
public ProtocolType getProtocol() {
74+
return _protocolType;
75+
}
76+
77+
public void setProtocol(ProtocolType protocolType) {
78+
_protocolType = protocolType;
79+
}
80+
81+
public Boolean getIsDisaggregated() {
82+
return _isDisaggregated;
83+
}
84+
85+
public void setIsDisaggregated(Boolean isDisaggregated) {
86+
_isDisaggregated = isDisaggregated;
87+
}
88+
}

plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/response/OntapResponse.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
import java.util.List;
2525

2626
/**
27-
* OnTapResponse
27+
* OntapResponse
2828
*/
2929
@JsonInclude(JsonInclude.Include.NON_NULL)
3030
public class OntapResponse<T> {
@@ -59,4 +59,4 @@ public void setRecords(List<T> records) {
5959
this.records = records;
6060
this.numRecords = (records != null) ? records.size() : 0;
6161
}
62-
}
62+
}

plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java

Lines changed: 151 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -21,19 +21,43 @@
2121

2222

2323
import com.cloud.agent.api.StoragePoolInfo;
24+
import com.cloud.dc.ClusterVO;
25+
import com.cloud.dc.dao.ClusterDao;
26+
import com.cloud.host.HostVO;
2427
import com.cloud.hypervisor.Hypervisor;
28+
import com.cloud.resource.ResourceManager;
29+
import com.cloud.storage.Storage;
30+
import com.cloud.storage.StorageManager;
2531
import com.cloud.storage.StoragePool;
32+
import com.cloud.utils.exception.CloudRuntimeException;
33+
import com.google.common.base.Preconditions;
2634
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
2735
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
2836
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
37+
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
2938
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
39+
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
3040
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
41+
import org.apache.cloudstack.storage.datastore.lifecycle.BasePrimaryDataStoreLifeCycleImpl;
42+
import org.apache.cloudstack.storage.feign.model.OntapStorage;
43+
import org.apache.cloudstack.storage.provider.StorageProviderFactory;
44+
import org.apache.cloudstack.storage.service.StorageStrategy;
45+
import org.apache.cloudstack.storage.utils.Constants;
46+
import org.apache.cloudstack.storage.utils.Constants.ProtocolType;
47+
import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
3148
import org.apache.logging.log4j.LogManager;
3249
import org.apache.logging.log4j.Logger;
33-
import java.util.Map;
3450

35-
public class OntapPrimaryDatastoreLifecycle implements PrimaryDataStoreLifeCycle {
51+
import javax.inject.Inject;
52+
import java.util.List;
53+
import java.util.Map;
54+
import java.util.UUID;
3655

56+
public class OntapPrimaryDatastoreLifecycle extends BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
57+
@Inject private ClusterDao _clusterDao;
58+
@Inject private StorageManager _storageMgr;
59+
@Inject private ResourceManager _resourceMgr;
60+
@Inject private PrimaryDataStoreHelper _dataStoreHelper;
3761
private static final Logger s_logger = (Logger)LogManager.getLogger(OntapPrimaryDatastoreLifecycle.class);
3862

3963
/**
@@ -43,14 +67,120 @@ public class OntapPrimaryDatastoreLifecycle implements PrimaryDataStoreLifeCycle
4367
*/
4468
@Override
4569
public DataStore initialize(Map<String, Object> dsInfos) {
46-
47-
return null;
48-
70+
if (dsInfos == null) {
71+
throw new CloudRuntimeException("Datastore info map is null, cannot create primary storage");
72+
}
73+
String url = dsInfos.get("url").toString(); // TODO: Decide on whether should the customer enter just the Management LIF IP or https://ManagementLIF
74+
Long zoneId = dsInfos.get("zoneId").toString().trim().isEmpty() ? null : (Long)dsInfos.get("zoneId");
75+
Long podId = dsInfos.get("podId").toString().trim().isEmpty() ? null : (Long)dsInfos.get("zoneId");
76+
Long clusterId = dsInfos.get("clusterId").toString().trim().isEmpty() ? null : (Long)dsInfos.get("clusterId");
77+
String storagePoolName = dsInfos.get("name").toString().trim();
78+
String providerName = dsInfos.get("providerName").toString().trim();
79+
String tags = dsInfos.get("tags").toString().trim();
80+
Boolean isTagARule = (Boolean) dsInfos.get("isTagARule");
81+
String scheme = dsInfos.get("scheme").toString();
82+
83+
s_logger.info("Creating ONTAP primary storage pool with name: " + storagePoolName + ", provider: " + providerName +
84+
", zoneId: " + zoneId + ", podId: " + podId + ", clusterId: " + clusterId + ", protocol: " + scheme);
85+
86+
// Additional details requested for ONTAP primary storage pool creation
87+
@SuppressWarnings("unchecked")
88+
Map<String, String> details = (Map<String, String>)dsInfos.get("details");
89+
// Validations
90+
if (podId == null ^ clusterId == null) {
91+
throw new CloudRuntimeException("Cluster Id or Pod Id is null, cannot create primary storage");
92+
}
93+
94+
if (podId == null && clusterId == null) {
95+
if (zoneId != null) {
96+
s_logger.info("Both Pod Id and Cluster Id are null, Primary storage pool will be associated with a Zone");
97+
} else {
98+
throw new CloudRuntimeException("Pod Id, Cluster Id and Zone Id are all null, cannot create primary storage");
99+
}
100+
}
101+
102+
if (storagePoolName == null || storagePoolName.isEmpty()) {
103+
throw new CloudRuntimeException("Storage pool name is null or empty, cannot create primary storage");
104+
}
105+
106+
if (providerName == null || providerName.isEmpty()) {
107+
throw new CloudRuntimeException("Provider name is null or empty, cannot create primary storage");
108+
}
109+
110+
PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
111+
if (clusterId != null) {
112+
ClusterVO clusterVO = _clusterDao.findById(clusterId);
113+
Preconditions.checkNotNull(clusterVO, "Unable to locate the specified cluster");
114+
if (clusterVO.getHypervisorType() != Hypervisor.HypervisorType.KVM) {
115+
throw new CloudRuntimeException("ONTAP primary storage is supported only for KVM hypervisor");
116+
}
117+
parameters.setHypervisorType(clusterVO.getHypervisorType());
118+
}
119+
120+
// TODO: While testing need to check what does this actually do and if the fields corresponding to each protocol should also be set
121+
// TODO: scheme could be 'custom' in our case and we might have to ask 'protocol' separately to the user
122+
ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL).toLowerCase());
123+
switch (protocol) {
124+
case NFS:
125+
parameters.setType(Storage.StoragePoolType.NetworkFilesystem);
126+
break;
127+
case ISCSI:
128+
parameters.setType(Storage.StoragePoolType.Iscsi);
129+
break;
130+
default:
131+
throw new CloudRuntimeException("Unsupported protocol: " + scheme + ", cannot create primary storage");
132+
}
133+
134+
details.put(Constants.MANAGEMENT_LIF, url);
135+
136+
// Validate the ONTAP details
137+
if(details.get(Constants.IS_DISAGGREGATED) == null || details.get(Constants.IS_DISAGGREGATED).isEmpty()) {
138+
details.put(Constants.IS_DISAGGREGATED, "false");
139+
}
140+
141+
OntapStorage ontapStorage = new OntapStorage(details.get(Constants.USERNAME), details.get(Constants.PASSWORD),
142+
details.get(Constants.MANAGEMENT_LIF), details.get(Constants.SVM_NAME), protocol,
143+
Boolean.parseBoolean(details.get(Constants.IS_DISAGGREGATED)));
144+
StorageStrategy storageStrategy = StorageProviderFactory.getStrategy(ontapStorage);
145+
boolean isValid = storageStrategy.connect();
146+
if (isValid) {
147+
// String volumeName = storagePoolName + "_vol"; //TODO: Figure out a better naming convention
148+
storageStrategy.createVolume(storagePoolName, Long.parseLong((details.get("size")))); // TODO: size should be in bytes, so see if conversion is needed
149+
} else {
150+
throw new CloudRuntimeException("ONTAP details validation failed, cannot create primary storage");
151+
}
152+
153+
parameters.setTags(tags);
154+
parameters.setIsTagARule(isTagARule);
155+
parameters.setDetails(details);
156+
parameters.setUuid(UUID.randomUUID().toString());
157+
parameters.setZoneId(zoneId);
158+
parameters.setPodId(podId);
159+
parameters.setClusterId(clusterId);
160+
parameters.setName(storagePoolName);
161+
parameters.setProviderName(providerName);
162+
parameters.setManaged(true);
163+
164+
return _dataStoreHelper.createPrimaryDataStore(parameters);
49165
}
50166

51167
@Override
52-
public boolean attachCluster(DataStore store, ClusterScope scope) {
53-
return false;
168+
public boolean attachCluster(DataStore dataStore, ClusterScope scope) {
169+
logger.debug("In attachCluster for ONTAP primary storage");
170+
PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo)dataStore;
171+
List<HostVO> hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primarystore);
172+
173+
logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primarystore.getClusterId()));
174+
for (HostVO host : hostsToConnect) {
175+
// TODO: Fetch the host IQN and add to the initiator group on ONTAP cluster
176+
try {
177+
_storageMgr.connectHostToSharedPool(host, dataStore.getId());
178+
} catch (Exception e) {
179+
logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e);
180+
}
181+
}
182+
_dataStoreHelper.attachCluster(dataStore);
183+
return true;
54184
}
55185

56186
@Override
@@ -60,7 +190,20 @@ public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo exis
60190

61191
@Override
62192
public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.HypervisorType hypervisorType) {
63-
return false;
193+
logger.debug("In attachZone for ONTAP primary storage");
194+
List<HostVO> hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), Hypervisor.HypervisorType.KVM);
195+
196+
logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect));
197+
for (HostVO host : hostsToConnect) {
198+
// TODO: Fetch the host IQN and add to the initiator group on ONTAP cluster
199+
try {
200+
_storageMgr.connectHostToSharedPool(host, dataStore.getId());
201+
} catch (Exception e) {
202+
logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e);
203+
}
204+
}
205+
_dataStoreHelper.attachZone(dataStore);
206+
return true;
64207
}
65208

66209
@Override

0 commit comments

Comments
 (0)