Skip to content

Commit fba0d1f

Browse files
authored
More places to consider compute requirements on volume creation (#2840)
* More places to consider compute requirements on volume creation * Consider compute requirements for migrated volumes
1 parent 001cdfd commit fba0d1f

File tree

5 files changed

+51
-40
lines changed

5 files changed

+51
-40
lines changed

internal/command/deploy/deploy_first.go

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -120,11 +120,12 @@ func (md *machineDeployment) provisionVolumesOnFirstDeploy(ctx context.Context)
120120
fmt.Fprintf(md.io.Out, "Creating 1GB volume '%s' for process group '%s'. Use 'fly vol extend' to increase its size\n", m.Source, groupName)
121121

122122
input := api.CreateVolumeRequest{
123-
Name: m.Source,
124-
Region: groupConfig.PrimaryRegion,
125-
SizeGb: api.Pointer(1),
126-
Encrypted: api.Pointer(true),
127-
HostDedicationId: md.appConfig.HostDedicationID,
123+
Name: m.Source,
124+
Region: groupConfig.PrimaryRegion,
125+
SizeGb: api.Pointer(1),
126+
Encrypted: api.Pointer(true),
127+
HostDedicationId: md.appConfig.HostDedicationID,
128+
ComputeRequirements: md.machineGuest,
128129
}
129130

130131
vol, err := md.flapsClient.CreateVolume(ctx, input)

internal/command/launch/launch_frameworks.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -136,11 +136,12 @@ func (state *launchState) scannerCreateVolumes(ctx context.Context) error {
136136

137137
for _, vol := range state.sourceInfo.Volumes {
138138
volume, err := flapsClient.CreateVolume(ctx, api.CreateVolumeRequest{
139-
Name: vol.Source,
140-
Region: state.Plan.RegionCode,
141-
SizeGb: api.Pointer(1),
142-
Encrypted: api.Pointer(true),
143-
HostDedicationId: state.appConfig.HostDedicationID,
139+
Name: vol.Source,
140+
Region: state.Plan.RegionCode,
141+
SizeGb: api.Pointer(1),
142+
Encrypted: api.Pointer(true),
143+
HostDedicationId: state.appConfig.HostDedicationID,
144+
ComputeRequirements: state.Plan.Guest(),
144145
})
145146
if err != nil {
146147
return err
@@ -219,7 +220,6 @@ func execInitCommand(ctx context.Context, command scanner.InitCommand) (err erro
219220
}
220221

221222
func (state *launchState) scannerSetAppconfig(ctx context.Context) error {
222-
223223
srcInfo := state.sourceInfo
224224
appConfig := state.appConfig
225225

internal/command/machine/clone.go

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -245,13 +245,14 @@ func runMachineClone(ctx context.Context) (err error) {
245245
}
246246

247247
volInput := api.CreateVolumeRequest{
248-
Name: mnt.Name,
249-
Region: region,
250-
SizeGb: &mnt.SizeGb,
251-
Encrypted: &mnt.Encrypted,
252-
SnapshotID: snapshotID,
253-
RequireUniqueZone: api.Pointer(flag.GetBool(ctx, "volume-requires-unique-zone")),
254-
HostDedicationId: source.HostDedicationID,
248+
Name: mnt.Name,
249+
Region: region,
250+
SizeGb: &mnt.SizeGb,
251+
Encrypted: &mnt.Encrypted,
252+
SnapshotID: snapshotID,
253+
RequireUniqueZone: api.Pointer(flag.GetBool(ctx, "volume-requires-unique-zone")),
254+
HostDedicationId: source.HostDedicationID,
255+
ComputeRequirements: targetConfig.Guest,
255256
}
256257
vol, err = flapsClient.CreateVolume(ctx, volInput)
257258
if err != nil {

internal/command/migrate_to_v2/volumes.go

Lines changed: 23 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -60,37 +60,45 @@ func (m *v2PlatformMigrator) migrateAppVolumes(ctx context.Context) error {
6060
return v
6161
}))
6262

63-
for _, vol := range m.oldAttachedVolumes {
64-
newVol, err := m.flapsClient.CreateVolume(ctx, api.CreateVolumeRequest{
65-
SourceVolumeID: &vol.ID,
66-
MachinesOnly: api.Pointer(true),
67-
Name: vol.Name,
68-
})
69-
if err != nil && strings.HasSuffix(err.Error(), " is not a valid candidate") {
70-
return fmt.Errorf("unfortunately the worker hosting your volume %s (%s) does not have capacity for another volume to support the migration; some other options: 1) try again later and there might be more space on the worker, 2) run a manual migration https://community.fly.io/t/manual-migration-to-apps-v2/11870, or 3) wait until we support volume migrations across workers (we're working on it!)", vol.ID, vol.Name)
71-
} else if err != nil {
72-
return err
73-
}
63+
allocMap := lo.KeyBy(m.oldAllocs, func(a *api.AllocationStatus) string {
64+
return a.IDShort
65+
})
7466

67+
for _, vol := range m.oldAttachedVolumes {
7568
// We have to search for the full alloc ID, because the volume only has the short-form alloc ID
76-
allocId := ""
7769
path := ""
70+
allocId := ""
71+
processGroup := ""
72+
7873
if shortAllocId := vol.AttachedAllocation; shortAllocId != nil {
79-
alloc, ok := lo.Find(m.oldAllocs, func(a *api.AllocationStatus) bool {
80-
return a.IDShort == *shortAllocId
81-
})
74+
alloc, ok := allocMap[*shortAllocId]
8275
if !ok {
8376
return fmt.Errorf("volume %s[%s] is attached to alloc %s, but that alloc is not running", vol.Name, vol.ID, *shortAllocId)
8477
}
8578
allocId = alloc.ID
79+
processGroup = alloc.TaskName
80+
8681
path = m.nomadVolPath(&vol, alloc.TaskName)
8782
if path == "" {
8883
return fmt.Errorf("volume %s[%s] is mounted on alloc %s, but has no mountpoint", vol.Name, vol.ID, allocId)
8984
}
9085
}
86+
87+
newVol, err := m.flapsClient.CreateVolume(ctx, api.CreateVolumeRequest{
88+
SourceVolumeID: &vol.ID,
89+
MachinesOnly: api.Pointer(true),
90+
Name: vol.Name,
91+
ComputeRequirements: m.machineGuests[processGroup],
92+
})
93+
if err != nil && strings.HasSuffix(err.Error(), " is not a valid candidate") {
94+
return fmt.Errorf("unfortunately the worker hosting your volume %s (%s) does not have capacity for another volume to support the migration; some other options: 1) try again later and there might be more space on the worker, 2) run a manual migration https://community.fly.io/t/manual-migration-to-apps-v2/11870, or 3) wait until we support volume migrations across workers (we're working on it!)", vol.ID, vol.Name)
95+
} else if err != nil {
96+
return err
97+
}
9198
if m.verbose {
9299
fmt.Fprintf(m.io.Out, "Forked volume %s[%s] into %s[%s]\n", vol.Name, vol.ID, newVol.Name, newVol.ID)
93100
}
101+
94102
m.createdVolumes = append(m.createdVolumes, &NewVolume{
95103
vol: newVol,
96104
previousAllocId: allocId,

internal/command/scale/machine_defaults.go

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -121,12 +121,13 @@ func (d *defaultValues) CreateVolumeRequest(mConfig *api.MachineConfig, region s
121121
}
122122
mount := mConfig.Mounts[0]
123123
return &api.CreateVolumeRequest{
124-
Name: mount.Name,
125-
Region: region,
126-
SizeGb: &mount.SizeGb,
127-
Encrypted: api.Pointer(mount.Encrypted),
128-
RequireUniqueZone: api.Pointer(false),
129-
SnapshotID: d.snapshotID,
130-
HostDedicationId: mConfig.HostDedicationId,
124+
Name: mount.Name,
125+
Region: region,
126+
SizeGb: &mount.SizeGb,
127+
Encrypted: api.Pointer(mount.Encrypted),
128+
RequireUniqueZone: api.Pointer(false),
129+
SnapshotID: d.snapshotID,
130+
HostDedicationId: mConfig.HostDedicationId,
131+
ComputeRequirements: mConfig.Guest,
131132
}
132133
}

0 commit comments

Comments
 (0)