Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pkg/disk/batcher/low_latency.go
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ func (w *LowLatency[T]) descBatch(logger logr.Logger, t time.Time, requests map[
}
// Not found
sendToAll(requests, getResponse[*T]{})
logger.V(3).Info("got batch", "n", len(thisBatch),
logger.V(2).Info("got batch", "n", len(thisBatch),
"requestID", resp.RequestID, "duration", w.clk.Since(t), "wait", t.Sub(firstTime))
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/disk/cloud.go
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ func (ad *DiskAttachDetach) findDevice(ctx context.Context, diskID, serial strin
// Returns device path if fromNode, disk serial number otherwise.
func (ad *DiskAttachDetach) attachDisk(ctx context.Context, diskID, nodeID string, fromNode bool) (string, error) {
logger := klog.FromContext(ctx)
logger.V(2).Info("Starting Do AttachDisk", "instanceID", nodeID, "region", GlobalConfigVar.Region)
logger.V(2).Info("Starting Do AttachDisk")

ecsClient := GlobalConfigVar.EcsClient
// Step 1: check disk status
Expand Down
4 changes: 3 additions & 1 deletion pkg/disk/disk.go
Original file line number Diff line number Diff line change
Expand Up @@ -246,10 +246,12 @@ func newBatcher(fromNode bool) (waitstatus.StatusWaiter[ecs.Disk], batcher.Batch
client := desc.Disk{Client: GlobalConfigVar.EcsClient}
ctx := context.Background()
interval := 1 * time.Second
max := 2 * time.Second
if fromNode {
interval = 2 * time.Second // We have many nodes, use longer interval to avoid throttling
max = 3 * time.Second
}
waiter := waitstatus.NewBatched(client, clock.RealClock{}, interval, 3*time.Second)
waiter := waitstatus.NewBatched(client, clock.RealClock{}, interval, max)
go waiter.Run(ctx)

b := batcher.NewLowLatency(client, clock.RealClock{}, 1*time.Second, 8)
Expand Down
1 change: 1 addition & 0 deletions pkg/disk/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,7 @@ var ecsOpenAPITransport = http.Transport{
ForceAttemptHTTP2: true,
MaxIdleConns: 100,
MaxIdleConnsPerHost: 100, // Set this equal to MaxIdleConns as we should only talk to one endpoint with this Transport instance.
MaxConnsPerHost: 500, // Protect our backend. Should be large enough to handle any workload.
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
Expand Down
2 changes: 1 addition & 1 deletion pkg/disk/waitstatus/batched.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ func (w *Batched[T]) Run(ctx context.Context) {
case r := <-w.feedback:
next := w.processFeedback(r)
w.idQueue = append(w.idQueue, next...)
logger.V(4).Info("poll response processed", "queueDepth", len(w.idQueue), "requeue", len(next))
logger.V(2).Info("poll response processed", "queueDepth", len(w.idQueue), "requeue", len(next))
case t := <-pollChan:
logger.V(4).Info("starting poll", "queueDepth", len(w.idQueue))
w.idQueue = w.poll(t, w.idQueue)
Expand Down
6 changes: 4 additions & 2 deletions pkg/features/features.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,17 @@ const (
DiskADController featuregate.Feature = "DiskADController"

// Attach multiple disks to the same node in parallel.
// ECS don't allow parallel attach/detach to a node by default.
// ECS don't allow parallel attach to a node by default.
// Enable this if you need faster attach, and only if your UID is whitelisted (by open a ticket),
// or you have the supportConcurrencyAttach=true tag on your ECS instance.
//
// Only effective when DiskADController is also enabled.
DiskParallelAttach featuregate.Feature = "DiskParallelAttach"

// Detach multiple disks from the same node in parallel.
// ECS does not allow parallel detach from a node currently. This feature gate is reserved for future use.
// ECS does not allow parallel detach from a node by default.
// Enable this if you need faster detach, and only if your UID is whitelisted (by open a ticket),
// or you have the supportConcurrencyDetach=true tag on your ECS instance.
//
// Only effective when DiskADController is also enabled.
DiskParallelDetach featuregate.Feature = "DiskParallelDetach"
Expand Down
Loading