Skip to content
Closed
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

lets take latest from Sandeep PR

Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,11 @@
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.host.Host;
import com.cloud.storage.Storage;
import com.cloud.storage.StoragePool;
import com.cloud.storage.ScopeType;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.Volume;
import com.cloud.storage.StoragePool;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.utils.Pair;
import com.cloud.utils.exception.CloudRuntimeException;
import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
Expand All @@ -45,6 +48,7 @@
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.service.StorageStrategy;
import org.apache.cloudstack.storage.service.model.AccessGroup;
import org.apache.cloudstack.storage.service.model.CloudStackVolume;
import org.apache.cloudstack.storage.service.model.ProtocolType;
import org.apache.cloudstack.storage.utils.Constants;
Expand All @@ -63,6 +67,7 @@ public class OntapPrimaryDatastoreDriver implements PrimaryDataStoreDriver {
@Inject private Utility utils;
@Inject private StoragePoolDetailsDao storagePoolDetailsDao;
@Inject private PrimaryDataStoreDao storagePoolDao;
@Inject private VolumeDao volumeDao;
@Override
public Map<String, String> getCapabilities() {
s_logger.trace("OntapPrimaryDatastoreDriver: getCapabilities: Called");
Expand Down Expand Up @@ -99,10 +104,16 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet
throw new InvalidParameterValueException("createAsync: callback should not be null");
}
try {
s_logger.info("createAsync: Started for data store [{}] and data object [{}] of type [{}]",
dataStore, dataObject, dataObject.getType());
s_logger.info("createAsync: Started for data store [{}] and data object [{}] of type [{}]", dataStore, dataObject, dataObject.getType());

StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId());
if(storagePool == null) {
s_logger.error("createCloudStackVolume : Storage Pool not found for id: " + dataStore.getId());
throw new CloudRuntimeException("createCloudStackVolume : Storage Pool not found for id: " + dataStore.getId());
}

if (dataObject.getType() == DataObjectType.VOLUME) {
path = createCloudStackVolumeForTypeVolume(dataStore, dataObject);
path = createCloudStackVolumeForTypeVolume(storagePool, dataObject);
createCmdResult = new CreateCmdResult(path, new Answer(null, true, null));
} else {
errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to createAsync";
Expand All @@ -119,13 +130,8 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet
}
}

private String createCloudStackVolumeForTypeVolume(DataStore dataStore, DataObject dataObject) {
StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId());
if(storagePool == null) {
s_logger.error("createCloudStackVolume : Storage Pool not found for id: " + dataStore.getId());
throw new CloudRuntimeException("createCloudStackVolume : Storage Pool not found for id: " + dataStore.getId());
}
Map<String, String> details = storagePoolDetailsDao.listDetailsKeyPairs(dataStore.getId());
private String createCloudStackVolumeForTypeVolume(StoragePoolVO storagePool, DataObject dataObject) {
Map<String, String> details = storagePoolDetailsDao.listDetailsKeyPairs(storagePool.getId());
StorageStrategy storageStrategy = utils.getStrategyByStoragePoolDetails(details);
s_logger.info("createCloudStackVolumeForTypeVolume: Connection to Ontap SVM [{}] successful, preparing CloudStackVolumeRequest", details.get(Constants.SVM_NAME));
CloudStackVolume cloudStackVolumeRequest = utils.createCloudStackVolumeRequestByProtocol(storagePool, details, dataObject);
Expand Down Expand Up @@ -171,9 +177,83 @@ public ChapInfo getChapInfo(DataObject dataObject) {

@Override
public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore) {
if (dataStore == null) {
throw new InvalidParameterValueException("grantAccess: dataStore should not be null");
}
if (dataObject == null) {
throw new InvalidParameterValueException("grantAccess: dataObject should not be null");
}
if (host == null) {
throw new InvalidParameterValueException("grantAccess: host should not be null");
}
try {
StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId());
if(storagePool == null) {
s_logger.error("grantAccess : Storage Pool not found for id: " + dataStore.getId());
throw new CloudRuntimeException("grantAccess : Storage Pool not found for id: " + dataStore.getId());
}
if (storagePool.getScope() != ScopeType.CLUSTER || storagePool.getScope() != ScopeType.ZONE) {
s_logger.error("grantAccess: Only Cluster and ZONE scoped primary storage is supported. Storage Pool: " + storagePool.getName());
throw new CloudRuntimeException("grantAccess: Only Cluster and ZONE scoped primary storage is supported. Storage Pool: " + storagePool.getName());
}

VolumeVO volumeVO = volumeDao.findById(dataObject.getId());
if(volumeVO == null) {
s_logger.error("grantAccess : Cloud Stack Volume not found for id: " + dataObject.getId());
throw new CloudRuntimeException("grantAccess : Cloud Stack Volume not found for id: " + dataObject.getId());
}

if (dataObject.getType() == DataObjectType.VOLUME) {
grantAccessForVolume(storagePool, volumeVO, host);
} else {
s_logger.error("Invalid DataObjectType (" + dataObject.getType() + ") passed to grantAccess");
throw new CloudRuntimeException("Invalid DataObjectType (" + dataObject.getType() + ") passed to grantAccess");
}
} catch(Exception e){
s_logger.error("grantAccess: Failed for dataObject [{}]: {}", dataObject, e.getMessage());
throw new CloudRuntimeException("grantAccess: Failed with error :" + e.getMessage());
}
return true;
}

private void grantAccessForVolume(StoragePoolVO storagePool, VolumeVO volumeVO, Host host) {
Map<String, String> details = storagePoolDetailsDao.listDetailsKeyPairs(storagePool.getId());
StorageStrategy storageStrategy = utils.getStrategyByStoragePoolDetails(details);

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this method has moved from utility class

String svmName = details.get(Constants.SVM_NAME);

if(ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) {
Map<String, String> getCloudStackVolumeMap = new HashMap<>();
getCloudStackVolumeMap.put(Constants.NAME, volumeVO.getPath());
getCloudStackVolumeMap.put(Constants.SVM_DOT_NAME, svmName);
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

check for wildcard in values

CloudStackVolume cloudStackVolume = storageStrategy.getCloudStackVolume(getCloudStackVolumeMap);
if(cloudStackVolume == null ||cloudStackVolume.getLun() == null || cloudStackVolume.getLun().getName() == null) {
s_logger.error("grantAccess: Failed to get LUN details [{}]", volumeVO.getName());
throw new CloudRuntimeException("grantAccess: Failed to get LUN [" + volumeVO.getName() + "]");
}

long scopeId = (storagePool.getScope() == ScopeType.CLUSTER) ? host.getClusterId() : host.getDataCenterId();
String igroupName = utils.getIgroupName(storagePool.getName(), scopeId);
Map<String, String> getAccessGroupMap = new HashMap<>();
getAccessGroupMap.put(Constants.NAME, igroupName);
getAccessGroupMap.put(Constants.SVM_DOT_NAME, svmName);
AccessGroup accessGroup = storageStrategy.getAccessGroup(getAccessGroupMap);
if (accessGroup == null || accessGroup.getIgroup() == null || accessGroup.getIgroup().getName() == null) {
s_logger.error("grantAccess: Failed to get iGroup details for host [{}]", host.getName());
throw new CloudRuntimeException("grantAccess: Failed to get iGroup details for host [" + host.getName() + "]");
}
if(!accessGroup.getIgroup().getInitiators().contains(host.getStorageUrl())) {
s_logger.error("grantAccess: initiator [{}] is not present in iGroup [{}]", host.getStorageUrl(), igroupName);
throw new CloudRuntimeException("grantAccess: initiator [" + host.getStorageUrl() + "] is not present in iGroup [" + igroupName);
}

Map<String, String> enableLogicalAccessMap = new HashMap<>();
enableLogicalAccessMap.put(Constants.LUN_DOT_NAME, volumeVO.getPath());
enableLogicalAccessMap.put(Constants.SVM_DOT_NAME, svmName);
enableLogicalAccessMap.put(Constants.IGROUP_DOT_NAME, igroupName);
storageStrategy.enableLogicalAccess(enableLogicalAccessMap);
}
}

@Override
public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,21 +65,17 @@ OntapResponse<Igroup> createIgroup(URI uri, @RequestHeader("Authorization") Stri

//this method to get all igroups and also filtered igroups based on query params as a part of URL
@RequestMapping(method = RequestMethod.GET)
OntapResponse<Igroup> getIgroupResponse(URI baseURL, @RequestHeader("Authorization") String header, @PathVariable(name = "uuid", required = true) String uuid);
OntapResponse<Igroup> getIgroupResponse(URI baseURL, @RequestHeader("Authorization") String header);
@RequestMapping(method = RequestMethod.GET, value = "/{uuid}")
Igroup getIgroupByUUID(URI baseURL, @RequestHeader("Authorization") String header, @PathVariable(name = "uuid", required = true) String uuid);
@RequestMapping(method = RequestMethod.DELETE, value = "/{uuid}")
void deleteIgroup(URI baseUri, @RequestHeader("Authorization") String authHeader, @PathVariable(name = "uuid", required = true) String uuid);

@RequestMapping(method = RequestMethod.POST, value = "/{uuid}/igroups")
OntapResponse<Igroup> addNestedIgroups(URI uri, @RequestHeader("Authorization") String header, @PathVariable(name = "uuid", required = true) String uuid,
@RequestBody Igroup igroupNestedRequest, @RequestHeader(value="return_records", defaultValue = "true") boolean value);


//Lun Maps Operation APIs

@RequestMapping(method = RequestMethod.POST)
OntapResponse<LunMap> createLunMap(URI baseURL, @RequestHeader("Authorization") String authHeader, @RequestBody LunMap lunMap);
OntapResponse<LunMap> createLunMap(URI baseURL, @RequestHeader("Authorization") String authHeader, @RequestHeader("return_records") boolean value,
@RequestBody LunMap lunMap);

@RequestMapping(method = RequestMethod.GET)
OntapResponse<LunMap> getLunMapResponse(URI baseURL, @RequestHeader("Authorization") String authHeader);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import com.cloud.agent.api.StoragePoolInfo;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.host.HostVO;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.resource.ResourceManager;
Expand All @@ -38,17 +39,23 @@
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.cloudstack.storage.datastore.lifecycle.BasePrimaryDataStoreLifeCycleImpl;
import org.apache.cloudstack.storage.feign.model.OntapStorage;
import org.apache.cloudstack.storage.provider.StorageProviderFactory;
import org.apache.cloudstack.storage.service.StorageStrategy;
import org.apache.cloudstack.storage.service.model.AccessGroup;
import org.apache.cloudstack.storage.service.model.ProtocolType;
import org.apache.cloudstack.storage.utils.Constants;
import org.apache.cloudstack.storage.utils.Utility;
import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;

import javax.inject.Inject;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.UUID;
Expand All @@ -58,7 +65,10 @@ public class OntapPrimaryDatastoreLifecycle extends BasePrimaryDataStoreLifeCycl
@Inject private StorageManager _storageMgr;
@Inject private ResourceManager _resourceMgr;
@Inject private PrimaryDataStoreHelper _dataStoreHelper;
private static final Logger s_logger = (Logger)LogManager.getLogger(OntapPrimaryDatastoreLifecycle.class);
@Inject private Utility utils;
@Inject private PrimaryDataStoreDao storagePoolDao;
@Inject private StoragePoolDetailsDao storagePoolDetailsDao;
private static final Logger s_logger = LogManager.getLogger(OntapPrimaryDatastoreLifecycle.class);

/**
* Creates primary storage on NetApp storage
Expand Down Expand Up @@ -167,12 +177,35 @@ public DataStore initialize(Map<String, Object> dsInfos) {
@Override
public boolean attachCluster(DataStore dataStore, ClusterScope scope) {
logger.debug("In attachCluster for ONTAP primary storage");
if (dataStore == null) {
throw new InvalidParameterValueException("attachCluster: dataStore should not be null");
}
if (scope == null) {
throw new InvalidParameterValueException("attachCluster: clusterScope should not be null");
}
List<String> hostsIdentifier = new ArrayList<>();
StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId());
if(storagePool == null) {
s_logger.error("attachCluster : Storage Pool not found for id: " + dataStore.getId());
throw new CloudRuntimeException("attachCluster : Storage Pool not found for id: " + dataStore.getId());
}
PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo)dataStore;
List<HostVO> hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primarystore);

// TODO- need to check if no host to connect then throw exception or just continue
logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primarystore.getClusterId()));

Map<String, String> details = primarystore.getDetails();
StorageStrategy strategy = utils.getStrategyByStoragePoolDetails(details);
ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL));
if (!isProtocolSupportedByAllHosts(hostsToConnect, protocol, hostsIdentifier)) {
throw new CloudRuntimeException("Not all hosts in the cluster support the protocol: " + protocol.toString());
}
//TODO - check if no host to connect then also need to create access group without initiators
if (hostsIdentifier != null && hostsIdentifier.size() > 0) {
AccessGroup accessGroupRequest = utils.createAccessGroupRequestByProtocol(storagePool, scope.getScopeId(), details, hostsIdentifier);
strategy.createAccessGroup(accessGroupRequest);
}
for (HostVO host : hostsToConnect) {
// TODO: Fetch the host IQN and add to the initiator group on ONTAP cluster
try {
_storageMgr.connectHostToSharedPool(host, dataStore.getId());
} catch (Exception e) {
Expand All @@ -183,6 +216,25 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) {
return true;
}

private boolean isProtocolSupportedByAllHosts(List<HostVO> hosts, ProtocolType protocolType, List<String> hostIdentifiers) {
String protocolPrefix;
switch (protocolType) {
case ISCSI:
protocolPrefix = Constants.IQN;
for (HostVO host : hosts) {
if (host == null || host.getStorageUrl() == null || host.getStorageUrl().trim().isEmpty()
|| !host.getStorageUrl().startsWith(protocolPrefix)) {
return false;
}
hostIdentifiers.add(host.getStorageUrl());
}
break;
default:
throw new CloudRuntimeException("Unsupported protocol: " + protocolType.toString());
}
return true;
}

@Override
public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) {
return false;
Expand All @@ -191,9 +243,32 @@ public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo exis
@Override
public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.HypervisorType hypervisorType) {
logger.debug("In attachZone for ONTAP primary storage");
if (dataStore == null) {
throw new InvalidParameterValueException("attachZone: dataStore should not be null");
}
if (scope == null) {
throw new InvalidParameterValueException("attachZone: clusterScope should not be null");
}
List<String> hostsIdentifier = new ArrayList<>();
StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId());
if(storagePool == null) {
s_logger.error("attachCluster : Storage Pool not found for id: " + dataStore.getId());
throw new CloudRuntimeException("attachCluster : Storage Pool not found for id: " + dataStore.getId());
}
List<HostVO> hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), Hypervisor.HypervisorType.KVM);

// TODO- need to check if no host to connect then throw exception or just continue
logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect));

Map<String, String> details = storagePoolDetailsDao.listDetailsKeyPairs(dataStore.getId());
StorageStrategy strategy = utils.getStrategyByStoragePoolDetails(details);
ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL));
if (!isProtocolSupportedByAllHosts(hostsToConnect, protocol, hostsIdentifier)) {
throw new CloudRuntimeException("Not all hosts in the zone support the protocol: " + protocol.toString());
}
if (hostsIdentifier != null && !hostsIdentifier.isEmpty()) {
AccessGroup accessGroupRequest = utils.createAccessGroupRequestByProtocol(storagePool, scope.getScopeId(), details, hostsIdentifier);
strategy.createAccessGroup(accessGroupRequest);
}
for (HostVO host : hostsToConnect) {
// TODO: Fetch the host IQN and add to the initiator group on ONTAP cluster
try {
Expand Down
Loading
Loading