Skip to content

Commit c78f1cb

Browse files
committed
Cleanup and integrate PR comments
Signed-off-by: Amory Hoste <[email protected]>
1 parent 27e693c commit c78f1cb

File tree

19 files changed

+656
-1496
lines changed

19 files changed

+656
-1496
lines changed

cri/firecracker/coordinator.go

Lines changed: 29 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -41,14 +41,12 @@ import (
4141

4242
const snapshotsDir = "/fccd/snapshots"
4343

44-
// TODO: interface for orchestrator
45-
4644
type coordinator struct {
4745
sync.Mutex
4846
orch *ctriface.Orchestrator
4947
nextID uint64
5048
isSparseSnaps bool
51-
isDeduplicatedSnaps bool
49+
isFullLocal bool
5250

5351
activeInstances map[string]*FuncInstance
5452
snapshotManager *snapshotting.SnapshotManager
@@ -64,15 +62,15 @@ func withoutOrchestrator() coordinatorOption {
6462
}
6563
}
6664

67-
func newFirecrackerCoordinator(orch *ctriface.Orchestrator, snapsCapacityMiB int64, isSparseSnaps bool, isDeduplicatedSnaps bool, opts ...coordinatorOption) *coordinator {
65+
func newFirecrackerCoordinator(orch *ctriface.Orchestrator, snapsCapacityMiB int64, isSparseSnaps bool, isFullLocal bool, opts ...coordinatorOption) *coordinator {
6866
c := &coordinator{
6967
activeInstances: make(map[string]*FuncInstance),
7068
orch: orch,
7169
isSparseSnaps: isSparseSnaps,
72-
isDeduplicatedSnaps: isDeduplicatedSnaps,
70+
isFullLocal: isFullLocal,
7371
}
7472

75-
if isDeduplicatedSnaps {
73+
if isFullLocal {
7674
c.snapshotManager = snapshotting.NewSnapshotManager(deduplicated.NewSnapshotManager(snapshotsDir, snapsCapacityMiB))
7775
} else {
7876
c.snapshotManager = snapshotting.NewSnapshotManager(regular.NewRegularSnapshotManager(snapshotsDir))
@@ -88,26 +86,25 @@ func newFirecrackerCoordinator(orch *ctriface.Orchestrator, snapsCapacityMiB int
8886
func (c *coordinator) startVM(ctx context.Context, image string, revision string, memSizeMib, vCPUCount uint32) (*FuncInstance, error) {
8987
if c.orch != nil && c.orch.GetSnapshotsEnabled() {
9088
id := image
91-
if c.isDeduplicatedSnaps {
89+
if c.isFullLocal {
9290
id = revision
9391
}
9492

9593
// Check if snapshot is available
9694
if snap, err := c.snapshotManager.AcquireSnapshot(id); err == nil {
9795
if snap.MemSizeMib != memSizeMib || snap.VCPUCount != vCPUCount {
98-
return nil, errors.New("Please create a new revision when updating uVM memory size or vCPU count")
96+
return nil, errors.New("uVM memory size or vCPU count in the snapshot do not match the requested ones.")
97+
}
98+
99+
vmID := ""
100+
if c.isFullLocal {
101+
vmID = strconv.Itoa(int(atomic.AddUint64(&c.nextID, 1)))
99102
} else {
100-
vmID := ""
101-
if c.isDeduplicatedSnaps {
102-
vmID = strconv.Itoa(int(atomic.AddUint64(&c.nextID, 1)))
103-
} else {
104-
vmID = snap.GetId()
105-
}
106-
107-
return c.orchStartVMSnapshot(ctx, snap, memSizeMib, vCPUCount, vmID)
103+
vmID = snap.GetId()
108104
}
109-
} else {
110-
return c.orchStartVM(ctx, image, revision, memSizeMib, vCPUCount)
105+
106+
return c.orchStartVMSnapshot(ctx, snap, memSizeMib, vCPUCount, vmID)
107+
111108
}
112109
}
113110

@@ -134,7 +131,7 @@ func (c *coordinator) stopVM(ctx context.Context, containerID string) error {
134131
}
135132

136133
id := fi.vmID
137-
if c.isDeduplicatedSnaps {
134+
if c.isFullLocal {
138135
id = fi.revisionId
139136
}
140137

@@ -148,7 +145,7 @@ func (c *coordinator) stopVM(ctx context.Context, containerID string) error {
148145
}
149146
}
150147

151-
if c.isDeduplicatedSnaps {
148+
if c.isFullLocal {
152149
return c.orchStopVM(ctx, fi)
153150
} else {
154151
return c.orchOffloadVM(ctx, fi)
@@ -201,7 +198,7 @@ func (c *coordinator) orchStartVM(ctx context.Context, image, revision string, m
201198

202199
if !c.withoutOrchestrator {
203200
trackDirtyPages := c.isSparseSnaps
204-
resp, _, err = c.orch.StartVM(ctxTimeout, vmID, image, memSizeMib, vCPUCount, trackDirtyPages)
201+
resp, _, err = c.orch.StartVM(ctxTimeout, vmID, image, memSizeMib, vCPUCount, trackDirtyPages, c.isFullLocal)
205202
if err != nil {
206203
logger.WithError(err).Error("coordinator failed to start VM")
207204
}
@@ -233,7 +230,7 @@ func (c *coordinator) orchStartVMSnapshot(ctx context.Context, snap *snapshottin
233230
ctxTimeout, cancel := context.WithTimeout(ctx, time.Second*30)
234231
defer cancel()
235232

236-
resp, _, err = c.orch.LoadSnapshot(ctxTimeout, vmID, snap)
233+
resp, _, err = c.orch.LoadSnapshot(ctxTimeout, vmID, snap, c.isFullLocal)
237234
if err != nil {
238235
logger.WithError(err).Error("failed to load VM")
239236
return nil, err
@@ -260,20 +257,18 @@ func (c *coordinator) orchCreateSnapshot(ctx context.Context, fi *FuncInstance)
260257
)
261258

262259
id := fi.vmID
263-
if c.isDeduplicatedSnaps {
260+
if c.isFullLocal {
264261
id = fi.revisionId
265262
}
266263

267264
removeContainerSnaps, snap, err := c.snapshotManager.InitSnapshot(id, fi.image, fi.coldStartTimeMs, fi.memSizeMib, fi.vCPUCount, c.isSparseSnaps)
268265

269266
if err != nil {
270-
if fmt.Sprint(err) == "There is not enough free space available" {
271-
fi.logger.Info(fmt.Sprintf("There is not enough space available for snapshots of %s", fi.revisionId))
272-
}
267+
fi.logger.Warn(fmt.Sprint(err))
273268
return nil
274269
}
275270

276-
if c.isDeduplicatedSnaps && removeContainerSnaps != nil {
271+
if c.isFullLocal && removeContainerSnaps != nil {
277272
for _, cleanupSnapId := range *removeContainerSnaps {
278273
if err := c.orch.CleanupSnapshot(ctx, cleanupSnapId); err != nil {
279274
return errors.Wrap(err, "removing devmapper revision snapshot")
@@ -292,7 +287,7 @@ func (c *coordinator) orchCreateSnapshot(ctx context.Context, fi *FuncInstance)
292287
return nil
293288
}
294289

295-
err = c.orch.CreateSnapshot(ctxTimeout, fi.vmID, snap)
290+
err = c.orch.CreateSnapshot(ctxTimeout, fi.vmID, snap, c.isFullLocal)
296291
if err != nil {
297292
fi.logger.WithError(err).Error("failed to create snapshot")
298293
return nil
@@ -306,26 +301,26 @@ func (c *coordinator) orchCreateSnapshot(ctx context.Context, fi *FuncInstance)
306301
return nil
307302
}
308303

309-
func (c *coordinator) orchStopVM(ctx context.Context, fi *FuncInstance) error {
304+
func (c *coordinator) orchOffloadVM(ctx context.Context, fi *FuncInstance) error {
310305
if c.withoutOrchestrator {
311306
return nil
312307
}
313308

314-
if err := c.orch.StopSingleVM(ctx, fi.vmID); err != nil {
315-
fi.logger.WithError(err).Error("failed to stop VM for instance")
309+
if err := c.orch.OffloadVM(ctx, fi.vmID, c.isFullLocal); err != nil {
310+
fi.logger.WithError(err).Error("failed to offload VM")
316311
return err
317312
}
318313

319314
return nil
320315
}
321316

322-
func (c *coordinator) orchOffloadVM(ctx context.Context, fi *FuncInstance) error {
317+
func (c *coordinator) orchStopVM(ctx context.Context, fi *FuncInstance) error {
323318
if c.withoutOrchestrator {
324319
return nil
325320
}
326321

327-
if err := c.orch.OffloadVM(ctx, fi.vmID); err != nil {
328-
fi.logger.WithError(err).Error("failed to offload VM")
322+
if err := c.orch.StopSingleVM(ctx, fi.vmID, c.isFullLocal); err != nil {
323+
fi.logger.WithError(err).Error("failed to stop VM for instance")
329324
return err
330325
}
331326

cri/firecracker/service.go

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ const (
4242
guestPortEnv = "GUEST_PORT"
4343
guestImageEnv = "GUEST_IMAGE"
4444
guestMemorySizeMibEnv = "MEM_SIZE_MB"
45-
guestvCPUCount = "VCPU_COUNT"
45+
guestvCPUCountEnv = "VCPU_COUNT"
4646
)
4747

4848
type FirecrackerService struct {
@@ -61,15 +61,15 @@ type VMConfig struct {
6161
guestPort string
6262
}
6363

64-
func NewFirecrackerService(orch *ctriface.Orchestrator, snapsCapacityMiB int64, isSparseSnaps, isDeduplicatedSnaps bool) (*FirecrackerService, error) {
64+
func NewFirecrackerService(orch *ctriface.Orchestrator, snapsCapacityMiB int64, isSparseSnaps, isFullLocal bool) (*FirecrackerService, error) {
6565
fs := new(FirecrackerService)
6666
stockRuntimeClient, err := cri.NewStockRuntimeServiceClient()
6767
if err != nil {
6868
log.WithError(err).Error("failed to create new stock runtime service client")
6969
return nil, err
7070
}
7171
fs.stockRuntimeClient = stockRuntimeClient
72-
fs.coordinator = newFirecrackerCoordinator(orch, snapsCapacityMiB, isSparseSnaps, isDeduplicatedSnaps)
72+
fs.coordinator = newFirecrackerCoordinator(orch, snapsCapacityMiB, isSparseSnaps, isFullLocal)
7373
fs.vmConfigs = make(map[string]*VMConfig)
7474
return fs, nil
7575
}
@@ -251,14 +251,13 @@ func getMemorySize(config *criapi.ContainerConfig) (uint32, error) {
251251
envs := config.GetEnvs()
252252
for _, kv := range envs {
253253
if kv.GetKey() == guestMemorySizeMibEnv {
254-
memSize, err := strconv.Atoi(kv.GetValue())
254+
memSize, err := strconv.ParseUint(kv.GetValue(), 10, 32)
255255
if err == nil {
256256
return uint32(memSize), nil
257257
} else {
258258
return 0, err
259259
}
260260
}
261-
262261
}
263262

264263
return uint32(256), nil
@@ -267,15 +266,14 @@ func getMemorySize(config *criapi.ContainerConfig) (uint32, error) {
267266
func getvCPUCount(config *criapi.ContainerConfig) (uint32, error) {
268267
envs := config.GetEnvs()
269268
for _, kv := range envs {
270-
if kv.GetKey() == guestvCPUCount {
271-
vCPUCount, err := strconv.Atoi(kv.GetValue())
269+
if kv.GetKey() == guestvCPUCountEnv {
270+
vCPUCount, err := strconv.ParseUint(kv.GetValue(), 10, 32)
272271
if err == nil {
273272
return uint32(vCPUCount), nil
274273
} else {
275274
return 0, err
276275
}
277276
}
278-
279277
}
280278

281279
return uint32(1), nil

ctriface/bench_test.go

Lines changed: 15 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,6 @@ package ctriface
2424

2525
import (
2626
"context"
27-
"github.com/ease-lab/vhive/ctriface/regular"
2827
"os"
2928
"os/exec"
3029
"path/filepath"
@@ -44,22 +43,6 @@ const (
4443
)
4544

4645
func TestBenchmarkStart(t *testing.T) {
47-
orch := NewOrchestrator(regular.NewRegOrchestrator(
48-
"devmapper",
49-
"",
50-
"fc-dev-thinpool",
51-
"",
52-
10,
53-
regular.WithTestModeOn(true),
54-
regular.WithUPF(*isUPFEnabled),
55-
))
56-
57-
benchCount := 10
58-
vmID := 0
59-
benchmarkStart(t, orch, benchCount, vmID)
60-
}
61-
62-
func benchmarkStart(t *testing.T, orch *Orchestrator, benchCount, vmID int) {
6346
log.SetFormatter(&log.TextFormatter{
6447
TimestampFormat: ctrdlog.RFC3339NanoFixed,
6548
FullTimestamp: true,
@@ -70,10 +53,22 @@ func benchmarkStart(t *testing.T, orch *Orchestrator, benchCount, vmID int) {
7053
log.SetLevel(log.InfoLevel)
7154

7255
testTimeout := 2000 * time.Second
73-
ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), regular.NamespaceName), testTimeout)
56+
ctx, cancel := context.WithTimeout(namespaces.WithNamespace(context.Background(), NamespaceName), testTimeout)
7457
defer cancel()
7558

59+
orch := NewOrchestrator(
60+
"devmapper",
61+
"",
62+
"fc-dev-thinpool",
63+
"",
64+
10,
65+
WithTestModeOn(true),
66+
WithUPF(*isUPFEnabled),
67+
)
68+
7669
images := getAllImages()
70+
benchCount := 10
71+
vmID := 0
7772

7873
createResultsDir()
7974

@@ -88,11 +83,11 @@ func benchmarkStart(t *testing.T, orch *Orchestrator, benchCount, vmID int) {
8883
for i := 0; i < benchCount; i++ {
8984
dropPageCache()
9085

91-
_, metric, err := orch.StartVM(ctx, vmIDString, imageName, 256, 1, false)
86+
_, metric, err := orch.StartVM(ctx, vmIDString, imageName, 256, 1, false, false)
9287
require.NoError(t, err, "Failed to start VM")
9388
startMetrics[i] = metric
9489

95-
err = orch.StopSingleVM(ctx, vmIDString)
90+
err = orch.StopSingleVM(ctx, vmIDString, false)
9691
require.NoError(t, err, "Failed to stop VM")
9792
}
9893

0 commit comments

Comments
 (0)