@@ -10,6 +10,7 @@ import (
1010 "github.com/scaleway/scaleway-sdk-go/scw"
1111 "github.com/scaleway/terraform-provider-scaleway/v2/internal/dsf"
1212 "github.com/scaleway/terraform-provider-scaleway/v2/internal/verify"
13+ "strings"
1314)
1415
1516func DataEasyPartitioning () * schema.Resource {
@@ -51,83 +52,6 @@ func DataEasyPartitioning() *schema.Resource {
5152 Optional : true ,
5253 Description : "The partitioning schema in json format" ,
5354 },
54- "disks" : {
55- Type : schema .TypeList ,
56- Computed : true ,
57- Elem : & schema.Resource {
58- Schema : map [string ]* schema.Schema {
59- "device" : {
60- Type : schema .TypeString ,
61- Computed : true ,
62- },
63- "partitions" : {
64- Type : schema .TypeList ,
65- Computed : true ,
66- Elem : & schema.Resource {
67- Schema : map [string ]* schema.Schema {
68- "label" : {
69- Type : schema .TypeString ,
70- Computed : true ,
71- },
72- "number" : {
73- Type : schema .TypeInt ,
74- Computed : true ,
75- },
76- "size" : {
77- Type : schema .TypeString , // scw.Size implements String()
78- Computed : true ,
79- },
80- "use_all_available_space" : {
81- Type : schema .TypeBool ,
82- Computed : true ,
83- },
84- },
85- },
86- },
87- },
88- },
89- },
90- "raids" : {
91- Type : schema .TypeList ,
92- Computed : true ,
93- Elem : & schema.Resource {
94- Schema : map [string ]* schema.Schema {
95- "name" : {
96- Type : schema .TypeString ,
97- Computed : true ,
98- },
99- "level" : {
100- Type : schema .TypeString ,
101- Computed : true ,
102- },
103- "devices" : {
104- Type : schema .TypeList ,
105- Computed : true ,
106- Elem : & schema.Schema {Type : schema .TypeString },
107- },
108- },
109- },
110- },
111- "filesystems" : {
112- Type : schema .TypeList ,
113- Computed : true ,
114- Elem : & schema.Resource {
115- Schema : map [string ]* schema.Schema {
116- "device" : {
117- Type : schema .TypeString ,
118- Computed : true ,
119- },
120- "format" : {
121- Type : schema .TypeString ,
122- Computed : true ,
123- },
124- "mountpoint" : {
125- Type : schema .TypeString ,
126- Computed : true ,
127- },
128- },
129- },
130- },
13155 },
13256 }
13357}
@@ -180,6 +104,7 @@ func addExtraPartition(mountpoint string, newDisksSchema []*baremetal.SchemaDisk
180104 raidDevices = append (raidDevices , device )
181105 deviceIndex --
182106 }
107+ defaultPartitionSchema .Disks = newDisksSchema
183108
184109 filesystem := & baremetal.SchemaFilesystem {
185110 Device : "/dev/md2" ,
@@ -188,13 +113,12 @@ func addExtraPartition(mountpoint string, newDisksSchema []*baremetal.SchemaDisk
188113 }
189114 defaultPartitionSchema .Filesystems = append (defaultPartitionSchema .Filesystems , filesystem )
190115
191- raid := & baremetal.SchemaRAID {
192- Name : "/dev/md2" ,
193- Level : baremetal .SchemaRAIDLevelRaidLevel1 ,
194- Devices : raidDevices ,
195- }
196- defaultPartitionSchema .Raids = append (defaultPartitionSchema .Raids , raid )
197- defaultPartitionSchema .Disks = newDisksSchema
116+ //raid := &baremetal.SchemaRAID{
117+ // Name: "/dev/md2",
118+ // Level: baremetal.SchemaRAIDLevelRaidLevel1,
119+ // Devices: raidDevices,
120+ //}
121+ //defaultPartitionSchema.Raids = append(defaultPartitionSchema.Raids, raid)
198122
199123 return defaultPartitionSchema
200124}
@@ -215,64 +139,6 @@ func manageRootSize(originalDisks []*baremetal.SchemaDisk, withSwap bool, withEx
215139 }
216140}
217141
218- func flattenDisksSchema (disks []* baremetal.SchemaDisk ) []map [string ]interface {} {
219- var out []map [string ]interface {}
220- for _ , d := range disks {
221- if d == nil {
222- continue
223- }
224-
225- parts := make ([]map [string ]interface {}, 0 , len (d .Partitions ))
226- for _ , p := range d .Partitions {
227- if p == nil {
228- continue
229- }
230- parts = append (parts , map [string ]interface {}{
231- "label" : string (p .Label ),
232- "number" : int (p .Number ),
233- "size" : p .Size .String (),
234- "use_all_available_space" : p .UseAllAvailableSpace ,
235- })
236- }
237-
238- out = append (out , map [string ]interface {}{
239- "device" : d .Device ,
240- "partitions" : parts ,
241- })
242- }
243- return out
244- }
245-
246- func flattenRaids (raids []* baremetal.SchemaRAID ) []map [string ]interface {} {
247- var out []map [string ]interface {}
248- for _ , r := range raids {
249- if r == nil {
250- continue
251- }
252- out = append (out , map [string ]interface {}{
253- "name" : r .Name ,
254- "level" : string (r .Level ),
255- "devices" : r .Devices ,
256- })
257- }
258- return out
259- }
260-
261- func flattenFilesystems (fsList []* baremetal.SchemaFilesystem ) []map [string ]interface {} {
262- var out []map [string ]interface {}
263- for _ , fs := range fsList {
264- if fs == nil {
265- continue
266- }
267- out = append (out , map [string ]interface {}{
268- "device" : fs .Device ,
269- "format" : string (fs .Format ),
270- "mountpoint" : fs .Mountpoint ,
271- })
272- }
273- return out
274- }
275-
276142func dataEasyPartitioningRead (ctx context.Context , d * schema.ResourceData , m interface {}) diag.Diagnostics {
277143 api , fallBackZone , err := newAPIWithZone (d , m )
278144 if err != nil {
@@ -324,9 +190,9 @@ func dataEasyPartitioningRead(ctx context.Context, d *schema.ResourceData, m int
324190 jsonSchema , _ := json .Marshal (defaultPartitioningSchema )
325191 d .SetId (fmt .Sprintf ("%s-%s" , offerID , osID ))
326192 _ = d .Set ("json_partition" , string (jsonSchema ))
327- _ = d .Set ("disks" , flattenDisksSchema (defaultPartitioningSchema .Disks ))
328- _ = d .Set ("raids" , flattenRaids (defaultPartitioningSchema .Raids ))
329- _ = d .Set ("filesystems" , flattenFilesystems (defaultPartitioningSchema .Filesystems ))
193+ // _ = d.Set("disks", flattenDisksSchema(defaultPartitioningSchema.Disks))
194+ // _ = d.Set("raids", flattenRaids(defaultPartitioningSchema.Raids))
195+ // _ = d.Set("filesystems", flattenFilesystems(defaultPartitioningSchema.Filesystems))
330196
331197 return nil
332198 }
@@ -336,16 +202,16 @@ func dataEasyPartitioningRead(ctx context.Context, d *schema.ResourceData, m int
336202 var newDiskSchema []* baremetal.SchemaDisk
337203 if ! swap {
338204 newDiskSchema = removeSwap (defaultPartitioningSchema .Disks , extraPart )
339- }
340-
341- if newDiskSchema == nil {
205+ } else {
342206 newDiskSchema = defaultPartitioningSchema .Disks
343207 }
344208
345209 var newCustomPartition * baremetal.Schema
346210 if extraPart {
347211 mountpoint := d .Get ("ext_4_mountpoint" ).(string )
348212 newCustomPartition = addExtraPartition (mountpoint , newDiskSchema , defaultPartitioningSchema )
213+ } else {
214+ newCustomPartition = defaultPartitioningSchema
349215 }
350216
351217 err = api .ValidatePartitioningSchema (& baremetal.ValidatePartitioningSchemaRequest {
@@ -365,10 +231,12 @@ func dataEasyPartitioningRead(ctx context.Context, d *schema.ResourceData, m int
365231 }
366232
367233 d .SetId (fmt .Sprintf ("%s-%s" , offerID , osID ))
368- _ = d .Set ("json_partition" , string (jsonSchema ))
369- _ = d .Set ("disks" , flattenDisksSchema (newCustomPartition .Disks ))
370- _ = d .Set ("raids" , flattenRaids (newCustomPartition .Raids ))
371- _ = d .Set ("filesystems" , flattenFilesystems (newCustomPartition .Filesystems ))
234+ jsonSchemaStr := string (jsonSchema )
235+ strings .ReplaceAll (jsonSchemaStr , "\" " , "\\ \" " )
236+ _ = d .Set ("json_partition" , jsonSchemaStr )
237+ //_ = d.Set("disks", flattenDisksSchema(newCustomPartition.Disks))
238+ //_ = d.Set("raids", flattenRaids(newCustomPartition.Raids))
239+ //_ = d.Set("filesystems", flattenFilesystems(newCustomPartition.Filesystems))
372240
373241 return nil
374242}
0 commit comments