2121
2222
2323import com .cloud .agent .api .StoragePoolInfo ;
24+ import com .cloud .dc .ClusterVO ;
25+ import com .cloud .dc .dao .ClusterDao ;
26+ import com .cloud .host .HostVO ;
2427import com .cloud .hypervisor .Hypervisor ;
28+ import com .cloud .resource .ResourceManager ;
29+ import com .cloud .storage .Storage ;
30+ import com .cloud .storage .StorageManager ;
2531import com .cloud .storage .StoragePool ;
32+ import com .cloud .utils .exception .CloudRuntimeException ;
33+ import com .google .common .base .Preconditions ;
2634import org .apache .cloudstack .engine .subsystem .api .storage .ClusterScope ;
2735import org .apache .cloudstack .engine .subsystem .api .storage .DataStore ;
2836import org .apache .cloudstack .engine .subsystem .api .storage .HostScope ;
37+ import org .apache .cloudstack .engine .subsystem .api .storage .PrimaryDataStoreInfo ;
2938import org .apache .cloudstack .engine .subsystem .api .storage .PrimaryDataStoreLifeCycle ;
39+ import org .apache .cloudstack .engine .subsystem .api .storage .PrimaryDataStoreParameters ;
3040import org .apache .cloudstack .engine .subsystem .api .storage .ZoneScope ;
41+ import org .apache .cloudstack .storage .datastore .lifecycle .BasePrimaryDataStoreLifeCycleImpl ;
42+ import org .apache .cloudstack .storage .feign .model .OntapStorage ;
43+ import org .apache .cloudstack .storage .provider .StorageProviderFactory ;
44+ import org .apache .cloudstack .storage .service .StorageStrategy ;
45+ import org .apache .cloudstack .storage .utils .Constants ;
46+ import org .apache .cloudstack .storage .volume .datastore .PrimaryDataStoreHelper ;
3147import org .apache .logging .log4j .LogManager ;
3248import org .apache .logging .log4j .Logger ;
33- import java .util .Map ;
3449
35- public class OntapPrimaryDatastoreLifecycle implements PrimaryDataStoreLifeCycle {
50+ import javax .inject .Inject ;
51+ import java .util .List ;
52+ import java .util .Map ;
53+ import java .util .UUID ;
3654
55+ public class OntapPrimaryDatastoreLifecycle extends BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
56+ @ Inject private ClusterDao _clusterDao ;
57+ @ Inject private StorageManager _storageMgr ;
58+ @ Inject private ResourceManager _resourceMgr ;
59+ @ Inject private PrimaryDataStoreHelper _dataStoreHelper ;
3760 private static final Logger s_logger = (Logger )LogManager .getLogger (OntapPrimaryDatastoreLifecycle .class );
3861
3962 /**
@@ -43,14 +66,125 @@ public class OntapPrimaryDatastoreLifecycle implements PrimaryDataStoreLifeCycle
4366 */
4467 @ Override
4568 public DataStore initialize (Map <String , Object > dsInfos ) {
69+ if (dsInfos == null ) {
70+ throw new CloudRuntimeException ("Datastore info map is null, cannot create primary storage" );
71+ }
72+ String url = dsInfos .get ("url" ).toString (); // TODO: Decide on whether should the customer enter just the Management LIF IP or https://ManagementLIF
73+ Long zoneId = (Long ) dsInfos .get ("zoneId" );
74+ Long podId = (Long )dsInfos .get ("podId" );
75+ Long clusterId = (Long )dsInfos .get ("clusterId" );
76+ String storagePoolName = dsInfos .get ("name" ).toString ();
77+ String providerName = dsInfos .get ("providerName" ).toString ();
78+ String tags = dsInfos .get ("tags" ).toString ();
79+ Boolean isTagARule = (Boolean ) dsInfos .get ("isTagARule" );
80+ String scheme = dsInfos .get ("scheme" ).toString ();
81+
82+ s_logger .info ("Creating ONTAP primary storage pool with name: " + storagePoolName + ", provider: " + providerName +
83+ ", zoneId: " + zoneId + ", podId: " + podId + ", clusterId: " + clusterId + ", protocol: " + scheme );
84+
85+ // Additional details requested for ONTAP primary storage pool creation
86+ @ SuppressWarnings ("unchecked" )
87+ Map <String , String > details = (Map <String , String >)dsInfos .get ("details" );
88+ // Validations
89+ if (podId != null && clusterId == null ) {
90+ s_logger .error ("Cluster Id is null, cannot create primary storage" );
91+ return null ;
92+ } else if (podId == null && clusterId != null ) {
93+ s_logger .error ("Pod Id is null, cannot create primary storage" );
94+ return null ;
95+ }
96+
97+ if (podId == null && clusterId == null ) {
98+ if (zoneId != null ) {
99+ s_logger .info ("Both Pod Id and Cluster Id are null, Primary storage pool will be associated with a Zone" );
100+ } else {
101+ throw new CloudRuntimeException ("Pod Id, Cluster Id and Zone Id are all null, cannot create primary storage" );
102+ }
103+ }
104+
105+ if (storagePoolName == null || storagePoolName .isEmpty ()) {
106+ throw new CloudRuntimeException ("Storage pool name is null or empty, cannot create primary storage" );
107+ }
108+
109+ if (providerName == null || providerName .isEmpty ()) {
110+ throw new CloudRuntimeException ("Provider name is null or empty, cannot create primary storage" );
111+ }
112+
113+ PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters ();
114+ if (clusterId != null ) {
115+ ClusterVO clusterVO = _clusterDao .findById (clusterId );
116+ Preconditions .checkNotNull (clusterVO , "Unable to locate the specified cluster" );
117+ if (clusterVO .getHypervisorType () != Hypervisor .HypervisorType .KVM ) {
118+ throw new CloudRuntimeException ("ONTAP primary storage is not supported for KVM hypervisor" );
119+ }
120+ parameters .setHypervisorType (clusterVO .getHypervisorType ());
121+ }
122+
123+ // TODO: While testing need to check what does this actually do and if the fields corresponding to each protocol should also be set
124+ // TODO: scheme could be 'custom' in our case and we might have to ask 'protocol' separately to the user
125+ String protocol = details .get (Constants .PROTOCOL );
126+ switch (protocol .toLowerCase ()) {
127+ case Constants .NFS :
128+ parameters .setType (Storage .StoragePoolType .NetworkFilesystem );
129+ break ;
130+ case Constants .ISCSI :
131+ parameters .setType (Storage .StoragePoolType .Iscsi );
132+ break ;
133+ default :
134+ throw new CloudRuntimeException ("Unsupported protocol: " + scheme + ", cannot create primary storage" );
135+ }
46136
47- return null ;
137+ details . put ( Constants . MANAGEMENTLIF , url ) ;
48138
139+ // Validate the ONTAP details
140+ if (details .get (Constants .ISDISAGGREGATED ) == null || details .get (Constants .ISDISAGGREGATED ).isEmpty ()) {
141+ details .put (Constants .ISDISAGGREGATED , "false" );
142+ }
143+
144+ OntapStorage ontapStorage = new OntapStorage (details .get (Constants .USERNAME ), details .get (Constants .PASSWORD ),
145+ details .get (Constants .MANAGEMENTLIF ), details .get (Constants .SVMNAME ), details .get (Constants .PROTOCOL ),
146+ Boolean .parseBoolean (details .get (Constants .ISDISAGGREGATED )));
147+ StorageProviderFactory storageProviderManager = new StorageProviderFactory (ontapStorage );
148+ StorageStrategy storageStrategy = storageProviderManager .getStrategy ();
149+ boolean isValid = storageStrategy .connect ();
150+ if (isValid ) {
151+ // String volumeName = storagePoolName + "_vol"; //TODO: Figure out a better naming convention
152+ storageStrategy .createVolume (storagePoolName , Long .parseLong ((details .get ("size" )))); // TODO: size should be in bytes, so see if conversion is needed
153+ } else {
154+ throw new CloudRuntimeException ("ONTAP details validation failed, cannot create primary storage" );
155+ }
156+
157+ parameters .setTags (tags );
158+ parameters .setIsTagARule (isTagARule );
159+ parameters .setDetails (details );
160+ parameters .setUuid (UUID .randomUUID ().toString ());
161+ parameters .setZoneId (zoneId );
162+ parameters .setPodId (podId );
163+ parameters .setClusterId (clusterId );
164+ parameters .setName (storagePoolName );
165+ parameters .setProviderName (providerName );
166+ parameters .setManaged (true );
167+
168+ return _dataStoreHelper .createPrimaryDataStore (parameters );
49169 }
50170
51171 @ Override
52- public boolean attachCluster (DataStore store , ClusterScope scope ) {
53- return false ;
172+ public boolean attachCluster (DataStore dataStore , ClusterScope scope ) {
173+ logger .debug ("In attachCluster for ONTAP primary storage" );
174+ PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo )dataStore ;
175+ List <HostVO > hostsToConnect = _resourceMgr .getEligibleUpAndEnabledHostsInClusterForStorageConnection (primarystore );
176+
177+ logger .debug (String .format ("Attaching the pool to each of the hosts %s in the cluster: %s" , hostsToConnect , primarystore .getClusterId ()));
178+ for (HostVO host : hostsToConnect ) {
179+ // TODO: Fetch the host IQN and add to the initiator group on ONTAP cluster
180+ try {
181+ _storageMgr .connectHostToSharedPool (host , dataStore .getId ());
182+ } catch (Exception e ) {
183+ logger .warn ("Unable to establish a connection between " + host + " and " + dataStore , e );
184+ }
185+ }
186+ _dataStoreHelper .attachCluster (dataStore );
187+ return true ;
54188 }
55189
56190 @ Override
@@ -60,7 +194,20 @@ public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo exis
60194
61195 @ Override
62196 public boolean attachZone (DataStore dataStore , ZoneScope scope , Hypervisor .HypervisorType hypervisorType ) {
63- return false ;
197+ logger .debug ("In attachZone for ONTAP primary storage" );
198+ List <HostVO > hostsToConnect = _resourceMgr .getEligibleUpAndEnabledHostsInZoneForStorageConnection (dataStore , scope .getScopeId (), Hypervisor .HypervisorType .KVM );
199+
200+ logger .debug (String .format ("In createPool. Attaching the pool to each of the hosts in %s." , hostsToConnect ));
201+ for (HostVO host : hostsToConnect ) {
202+ // TODO: Fetch the host IQN and add to the initiator group on ONTAP cluster
203+ try {
204+ _storageMgr .connectHostToSharedPool (host , dataStore .getId ());
205+ } catch (Exception e ) {
206+ logger .warn ("Unable to establish a connection between " + host + " and " + dataStore , e );
207+ }
208+ }
209+ _dataStoreHelper .attachZone (dataStore );
210+ return true ;
64211 }
65212
66213 @ Override
0 commit comments