@@ -3,14 +3,16 @@ package baremetal
33import (
44 "context"
55 "encoding/json"
6+ "errors"
67 "fmt"
8+
79 "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
810 "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
911 "github.com/scaleway/scaleway-sdk-go/api/baremetal/v1"
1012 "github.com/scaleway/scaleway-sdk-go/scw"
1113 "github.com/scaleway/terraform-provider-scaleway/v2/internal/dsf"
14+ "github.com/scaleway/terraform-provider-scaleway/v2/internal/locality/zonal"
1215 "github.com/scaleway/terraform-provider-scaleway/v2/internal/verify"
13- "strings"
1416)
1517
1618func DataEasyPartitioning () * schema.Resource {
@@ -41,7 +43,7 @@ func DataEasyPartitioning() *schema.Resource {
4143 Default : true ,
4244 Description : "set extra ext_4 partition" ,
4345 },
44- "ext_4_mountpoint" : { //TODO change to mount point
46+ "ext_4_mountpoint" : { // TODO change to mount point
4547 Type : schema .TypeString ,
4648 Optional : true ,
4749 Default : "/hello" ,
@@ -56,129 +58,45 @@ func DataEasyPartitioning() *schema.Resource {
5658 }
5759}
5860
59- func removeSwap (originalDisks []* baremetal.SchemaDisk , withExtraPartition bool ) []* baremetal.SchemaDisk {
60- var result []* baremetal.SchemaDisk
61-
62- for _ , disk := range originalDisks {
63- i := 1
64- newPartitions := []* baremetal.SchemaPartition {}
65- for _ , p := range disk .Partitions {
66- if p .Label == "swap" {
67- continue
68- }
69- if p .Label == "root" {
70- if ! withExtraPartition {
71- p .Size = 0
72- p .UseAllAvailableSpace = true
73- } else {
74- p .Size = 20000000000
75- }
76- }
77- p .Number = uint32 (i )
78- i ++
79- newPartitions = append (newPartitions , p )
80- }
81- result = append (result , & baremetal.SchemaDisk {
82- Device : disk .Device ,
83- Partitions : newPartitions ,
84- })
85- }
86- return result
87- }
88-
89- func addExtraPartition (mountpoint string , newDisksSchema []* baremetal.SchemaDisk , defaultPartitionSchema * baremetal.Schema ) * baremetal.Schema {
90- raidDevices := []string {}
91-
92- for _ , disk := range newDisksSchema {
93- partIndex := uint32 (len (disk .Partitions )) + 1
94- deviceIndex := partIndex + 1
95- data := & baremetal.SchemaPartition {
96- Label : baremetal .SchemaPartitionLabel ("data" ),
97- Number : partIndex ,
98- Size : 0 ,
99- UseAllAvailableSpace : true ,
100- }
101- disk .Partitions = append (disk .Partitions , data )
102-
103- device := fmt .Sprintf ("%sp%d" , disk .Device , deviceIndex )
104- raidDevices = append (raidDevices , device )
105- deviceIndex --
106- }
107- defaultPartitionSchema .Disks = newDisksSchema
108-
109- filesystem := & baremetal.SchemaFilesystem {
110- Device : "/dev/md2" ,
111- Format : "ext4" ,
112- Mountpoint : mountpoint ,
113- }
114- defaultPartitionSchema .Filesystems = append (defaultPartitionSchema .Filesystems , filesystem )
115-
116- //raid := &baremetal.SchemaRAID{
117- // Name: "/dev/md2",
118- // Level: baremetal.SchemaRAIDLevelRaidLevel1,
119- // Devices: raidDevices,
120- //}
121- //defaultPartitionSchema.Raids = append(defaultPartitionSchema.Raids, raid)
122-
123- return defaultPartitionSchema
124- }
125-
126- func manageRootSize (originalDisks []* baremetal.SchemaDisk , withSwap bool , withExtraPartition bool ) {
127- for _ , disk := range originalDisks {
128- for _ , partition := range disk .Partitions {
129- if partition .Label == "root" {
130- if ! withSwap && ! withExtraPartition {
131- partition .Size = 0
132- partition .UseAllAvailableSpace = true
133- }
134- if withExtraPartition {
135- partition .Size = 20000000000
136- }
137- }
138- }
139- }
140- }
141-
14261func dataEasyPartitioningRead (ctx context.Context , d * schema.ResourceData , m interface {}) diag.Diagnostics {
14362 api , fallBackZone , err := newAPIWithZone (d , m )
14463 if err != nil {
14564 return diag .FromErr (err )
14665 }
14766
148- osID := d .Get ("os_id" ).(string )
67+ osID := zonal . ExpandID ( d .Get ("os_id" ).(string ) )
14968
15069 os , err := api .GetOS (& baremetal.GetOSRequest {
15170 Zone : fallBackZone ,
152- OsID : osID ,
71+ OsID : osID . ID ,
15372 }, scw .WithContext (ctx ))
15473 if err != nil {
15574 return diag .FromErr (err )
15675 }
15776
15877 if ! os .CustomPartitioningSupported {
159- return diag .FromErr (fmt . Errorf ("custom partitioning is not supported with this OS" ))
78+ return diag .FromErr (errors . New ("custom partitioning is not supported with this OS" ))
16079 }
16180
162- offerID := d .Get ("offer_id" ).(string )
81+ offerID := zonal . ExpandID ( d .Get ("offer_id" ).(string ) )
16382
16483 offer , err := api .GetOffer (& baremetal.GetOfferRequest {
16584 Zone : fallBackZone ,
166- OfferID : offerID ,
85+ OfferID : offerID . ID ,
16786 }, scw .WithContext (ctx ))
16887 if err != nil {
16988 return diag .FromErr (err )
17089 }
17190
17291 if ! isOSCompatible (offer , os ) {
173- return diag .FromErr (fmt . Errorf ( "os and offer are not compatible" ))
92+ return diag .FromErr (errors . New ( "OS and offer are not compatible" ))
17493 }
17594
17695 defaultPartitioningSchema , err := api .GetDefaultPartitioningSchema (& baremetal.GetDefaultPartitioningSchemaRequest {
17796 Zone : fallBackZone ,
178- OfferID : offerID ,
179- OsID : osID ,
97+ OfferID : offerID . ID ,
98+ OsID : osID . ID ,
18099 }, scw .WithContext (ctx ))
181-
182100 if err != nil {
183101 return diag .FromErr (err )
184102 }
@@ -187,12 +105,13 @@ func dataEasyPartitioningRead(ctx context.Context, d *schema.ResourceData, m int
187105 swap := d .Get ("swap" ).(bool )
188106
189107 if swap && ! extraPart {
190- jsonSchema , _ := json .Marshal (defaultPartitioningSchema )
108+ jsonSchema , err := json .Marshal (defaultPartitioningSchema )
109+ if err != nil {
110+ return diag .FromErr (err )
111+ }
112+
191113 d .SetId (fmt .Sprintf ("%s-%s" , offerID , osID ))
192114 _ = d .Set ("json_partition" , string (jsonSchema ))
193- //_ = d.Set("disks", flattenDisksSchema(defaultPartitioningSchema.Disks))
194- //_ = d.Set("raids", flattenRaids(defaultPartitioningSchema.Raids))
195- //_ = d.Set("filesystems", flattenFilesystems(defaultPartitioningSchema.Filesystems))
196115
197116 return nil
198117 }
@@ -207,6 +126,7 @@ func dataEasyPartitioningRead(ctx context.Context, d *schema.ResourceData, m int
207126 }
208127
209128 var newCustomPartition * baremetal.Schema
129+
210130 if extraPart {
211131 mountpoint := d .Get ("ext_4_mountpoint" ).(string )
212132 newCustomPartition = addExtraPartition (mountpoint , newDiskSchema , defaultPartitioningSchema )
@@ -216,11 +136,10 @@ func dataEasyPartitioningRead(ctx context.Context, d *schema.ResourceData, m int
216136
217137 err = api .ValidatePartitioningSchema (& baremetal.ValidatePartitioningSchemaRequest {
218138 Zone : fallBackZone ,
219- OfferID : offerID ,
220- OsID : osID ,
139+ OfferID : offerID . ID ,
140+ OsID : osID . ID ,
221141 PartitioningSchema : defaultPartitioningSchema ,
222142 })
223-
224143 if err != nil {
225144 return diag .FromErr (err )
226145 }
@@ -231,12 +150,91 @@ func dataEasyPartitioningRead(ctx context.Context, d *schema.ResourceData, m int
231150 }
232151
233152 d .SetId (fmt .Sprintf ("%s-%s" , offerID , osID ))
153+
234154 jsonSchemaStr := string (jsonSchema )
235- strings . ReplaceAll ( jsonSchemaStr , " \" " , " \\ \" " )
155+
236156 _ = d .Set ("json_partition" , jsonSchemaStr )
237- //_ = d.Set("disks", flattenDisksSchema(newCustomPartition.Disks))
238- //_ = d.Set("raids", flattenRaids(newCustomPartition.Raids))
239- //_ = d.Set("filesystems", flattenFilesystems(newCustomPartition.Filesystems))
240157
241158 return nil
242159}
160+
161+ func removeSwap (originalDisks []* baremetal.SchemaDisk , withExtraPartition bool ) []* baremetal.SchemaDisk {
162+ lenOfDisks := len (originalDisks )
163+ if ! withExtraPartition {
164+ lenOfDisks = len (originalDisks ) - 1
165+ }
166+
167+ result := make ([]* baremetal.SchemaDisk , 0 , lenOfDisks )
168+
169+ for _ , disk := range originalDisks {
170+ i := 1
171+ newPartitions := []* baremetal.SchemaPartition {}
172+
173+ for _ , p := range disk .Partitions {
174+ if p .Label == "swap" {
175+ continue
176+ }
177+
178+ if p .Label == "root" {
179+ if ! withExtraPartition {
180+ p .Size = 0
181+ p .UseAllAvailableSpace = true
182+ } else {
183+ p .Size = 20000000000
184+ }
185+ }
186+
187+ p .Number = uint32 (i )
188+ i ++
189+
190+ newPartitions = append (newPartitions , p )
191+ }
192+
193+ result = append (result , & baremetal.SchemaDisk {
194+ Device : disk .Device ,
195+ Partitions : newPartitions ,
196+ })
197+ }
198+
199+ return result
200+ }
201+
202+ func addExtraPartition (mountpoint string , newDisksSchema []* baremetal.SchemaDisk , defaultPartitionSchema * baremetal.Schema ) * baremetal.Schema {
203+ for _ , disk := range newDisksSchema {
204+ partIndex := uint32 (len (disk .Partitions )) + 1
205+ data := & baremetal.SchemaPartition {
206+ Label : baremetal .SchemaPartitionLabel ("data" ),
207+ Number : partIndex ,
208+ Size : 0 ,
209+ UseAllAvailableSpace : true ,
210+ }
211+ disk .Partitions = append (disk .Partitions , data )
212+ }
213+
214+ defaultPartitionSchema .Disks = newDisksSchema
215+ filesystem := & baremetal.SchemaFilesystem {
216+ Device : "/dev/md2" ,
217+ Format : "ext4" ,
218+ Mountpoint : mountpoint ,
219+ }
220+ defaultPartitionSchema .Filesystems = append (defaultPartitionSchema .Filesystems , filesystem )
221+
222+ return defaultPartitionSchema
223+ }
224+
225+ func manageRootSize (originalDisks []* baremetal.SchemaDisk , withSwap bool , withExtraPartition bool ) {
226+ for _ , disk := range originalDisks {
227+ for _ , partition := range disk .Partitions {
228+ if partition .Label == "root" {
229+ if ! withSwap && ! withExtraPartition {
230+ partition .Size = 0
231+ partition .UseAllAvailableSpace = true
232+ }
233+
234+ if withExtraPartition {
235+ partition .Size = 20000000000
236+ }
237+ }
238+ }
239+ }
240+ }
0 commit comments