@@ -28,6 +28,7 @@ import (
2828 . "github.com/netapp/trident/logging"
2929 "github.com/netapp/trident/pkg/collection"
3030 "github.com/netapp/trident/pkg/convert"
31+ "github.com/netapp/trident/pkg/locks"
3132 sa "github.com/netapp/trident/storage_attribute"
3233 "github.com/netapp/trident/utils"
3334 "github.com/netapp/trident/utils/devices"
@@ -105,7 +106,7 @@ const (
105106
106107func attemptLock (ctx context.Context , lockContext , lockID string , lockTimeout time.Duration ) bool {
107108 startTime := time .Now ()
108- utils .Lock (ctx , lockContext , lockID )
109+ locks .Lock (ctx , lockContext , lockID )
109110 // Fail if the gRPC call came in a long time ago to avoid kubelet 120s timeout
110111 if time .Since (startTime ) > lockTimeout {
111112 Logc (ctx ).Debugf ("Request spent more than %v in the queue and timed out" , csiNodeLockTimeout )
@@ -127,7 +128,7 @@ func (p *Plugin) NodeStageVolume(
127128 defer Logc (ctx ).WithFields (fields ).Debug ("<<<< NodeStageVolume" )
128129
129130 lockContext := "NodeStageVolume"
130- defer utils .Unlock (ctx , lockContext , req .GetVolumeId ())
131+ defer locks .Unlock (ctx , lockContext , req .GetVolumeId ())
131132 if ! attemptLock (ctx , lockContext , req .GetVolumeId (), csiNodeLockTimeout ) {
132133 return nil , status .Error (codes .Aborted , "request waited too long for the lock" )
133134 }
@@ -181,7 +182,7 @@ func (p *Plugin) nodeUnstageVolume(
181182 defer Logc (ctx ).WithFields (fields ).Debug ("<<<< NodeUnstageVolume" )
182183
183184 lockContext := "NodeUnstageVolume"
184- defer utils .Unlock (ctx , lockContext , req .GetVolumeId ())
185+ defer locks .Unlock (ctx , lockContext , req .GetVolumeId ())
185186 if ! attemptLock (ctx , lockContext , req .GetVolumeId (), csiNodeLockTimeout ) {
186187 return nil , status .Error (codes .Aborted , "request waited too long for the lock" )
187188 }
@@ -269,7 +270,7 @@ func (p *Plugin) NodePublishVolume(
269270 defer Logc (ctx ).WithFields (fields ).Debug ("<<<< NodePublishVolume" )
270271
271272 lockContext := "NodePublishVolume"
272- defer utils .Unlock (ctx , lockContext , req .GetVolumeId ())
273+ defer locks .Unlock (ctx , lockContext , req .GetVolumeId ())
273274 if ! attemptLock (ctx , lockContext , req .GetVolumeId (), csiNodeLockTimeout ) {
274275 return nil , status .Error (codes .Aborted , "request waited too long for the lock" )
275276 }
@@ -326,7 +327,7 @@ func (p *Plugin) NodeUnpublishVolume(
326327 defer Logc (ctx ).WithFields (fields ).Debug ("<<<< NodeUnpublishVolume" )
327328
328329 lockContext := "NodeUnpublishVolume"
329- defer utils .Unlock (ctx , lockContext , req .GetVolumeId ())
330+ defer locks .Unlock (ctx , lockContext , req .GetVolumeId ())
330331 if ! attemptLock (ctx , lockContext , req .GetVolumeId (), csiNodeLockTimeout ) {
331332 return nil , status .Error (codes .Aborted , "request waited too long for the lock" )
332333 }
@@ -1553,7 +1554,7 @@ func (p *Plugin) nodeUnstageFCPVolumeRetry(
15531554) (* csi.NodeUnstageVolumeResponse , error ) {
15541555 // Serializing all the parallel requests by relying on the constant var.
15551556 lockContext := "NodeUnstageFCPVolume-" + req .GetVolumeId ()
1556- defer utils .Unlock (ctx , lockContext , nodeLockID )
1557+ defer locks .Unlock (ctx , lockContext , nodeLockID )
15571558
15581559 if ! attemptLock (ctx , lockContext , nodeLockID , csiNodeLockTimeout ) {
15591560 return nil , status .Error (codes .Aborted , "request waited too long for the lock" )
@@ -1588,7 +1589,7 @@ func (p *Plugin) nodePublishFCPVolume(
15881589) (* csi.NodePublishVolumeResponse , error ) {
15891590 // Serializing all the parallel requests by relying on the constant var.
15901591 lockContext := "NodePublishFCPVolume-" + req .GetVolumeId ()
1591- defer utils .Unlock (ctx , lockContext , nodeLockID )
1592+ defer locks .Unlock (ctx , lockContext , nodeLockID )
15921593
15931594 if ! attemptLock (ctx , lockContext , nodeLockID , csiNodeLockTimeout ) {
15941595 return nil , status .Error (codes .Aborted , "request waited too long for the lock" )
@@ -2090,7 +2091,7 @@ func (p *Plugin) nodeUnstageISCSIVolumeRetry(
20902091) (* csi.NodeUnstageVolumeResponse , error ) {
20912092 // Serializing all the parallel requests by relying on the constant var.
20922093 lockContext := "NodeUnstageISCSIVolume-" + req .GetVolumeId ()
2093- defer utils .Unlock (ctx , lockContext , nodeLockID )
2094+ defer locks .Unlock (ctx , lockContext , nodeLockID )
20942095
20952096 if ! attemptLock (ctx , lockContext , nodeLockID , csiNodeLockTimeout ) {
20962097 return nil , status .Error (codes .Aborted , "request waited too long for the lock" )
@@ -2125,7 +2126,7 @@ func (p *Plugin) nodePublishISCSIVolume(
21252126) (* csi.NodePublishVolumeResponse , error ) {
21262127 // Serializing all the parallel requests by relying on the constant var.
21272128 lockContext := "NodePublishISCSIVolume-" + req .GetVolumeId ()
2128- defer utils .Unlock (ctx , lockContext , nodeLockID )
2129+ defer locks .Unlock (ctx , lockContext , nodeLockID )
21292130
21302131 if ! attemptLock (ctx , lockContext , nodeLockID , csiNodeLockTimeout ) {
21312132 return nil , status .Error (codes .Aborted , "request waited too long for the lock" )
@@ -2655,8 +2656,8 @@ func (p *Plugin) updateCHAPInfoForSessions(
26552656// performISCSISelfHealing inspects the desired state of the iSCSI sessions with the current state and accordingly
26562657// identifies candidate sessions that require remediation. This function is invoked periodically.
26572658func (p * Plugin ) performISCSISelfHealing (ctx context.Context ) {
2658- utils .Lock (ctx , iSCSISelfHealingLockContext , nodeLockID )
2659- defer utils .Unlock (ctx , iSCSISelfHealingLockContext , nodeLockID )
2659+ locks .Lock (ctx , iSCSISelfHealingLockContext , nodeLockID )
2660+ defer locks .Unlock (ctx , iSCSISelfHealingLockContext , nodeLockID )
26602661
26612662 defer func () {
26622663 if r := recover (); r != nil {
@@ -2742,7 +2743,7 @@ func (p *Plugin) fixISCSISessions(ctx context.Context, portals []string, portalT
27422743
27432744 // Check if there is a need to stop the loop from running
27442745 // NOTE: The loop should run at least once for all portal types.
2745- if idx > 0 && utils .WaitQueueSize (nodeLockID ) > 0 {
2746+ if idx > 0 && locks .WaitQueueSize (nodeLockID ) > 0 {
27462747 // Check to see if some other operation(s) requires node lock, if not then continue to resolve
27472748 // non-stale iSCSI portal issues else break out of this loop.
27482749 if isNonStaleSessionFix {
@@ -2913,7 +2914,7 @@ func (p *Plugin) nodeUnstageNVMeVolume(
29132914) (* csi.NodeUnstageVolumeResponse , error ) {
29142915 // Serializing all the parallel requests by relying on the constant var.
29152916 lockContext := "NodeUnstageNVMeVolume-" + req .GetVolumeId ()
2916- defer utils .Unlock (ctx , lockContext , nodeLockID )
2917+ defer locks .Unlock (ctx , lockContext , nodeLockID )
29172918
29182919 if ! attemptLock (ctx , lockContext , nodeLockID , csiNodeLockTimeout ) {
29192920 return nil , status .Error (codes .Aborted , "request waited too long for the lock" )
@@ -3057,7 +3058,7 @@ func (p *Plugin) nodePublishNVMeVolume(
30573058) (* csi.NodePublishVolumeResponse , error ) {
30583059 // Serializing all the parallel requests by relying on the constant var.
30593060 lockContext := "NodePublishNVMeVolume-" + req .GetVolumeId ()
3060- defer utils .Unlock (ctx , lockContext , nodeLockID )
3061+ defer locks .Unlock (ctx , lockContext , nodeLockID )
30613062
30623063 if ! attemptLock (ctx , lockContext , nodeLockID , csiNodeLockTimeout ) {
30633064 return nil , status .Error (codes .Aborted , "request waited too long for the lock" )
@@ -3128,7 +3129,7 @@ func (p *Plugin) nodeStageSANVolume(
31283129) (* csi.NodeStageVolumeResponse , error ) {
31293130 // Serializing all the parallel requests by relying on the constant var.
31303131 lockContext := "NodeStageSanVolume-" + req .GetVolumeId ()
3131- defer utils .Unlock (ctx , lockContext , nodeLockID )
3132+ defer locks .Unlock (ctx , lockContext , nodeLockID )
31323133
31333134 if ! attemptLock (ctx , lockContext , nodeLockID , csiNodeLockTimeout ) {
31343135 return nil , status .Error (codes .Aborted , "request waited too long for the lock" )
@@ -3190,8 +3191,8 @@ func (p *Plugin) nodeStageSANVolume(
31903191// performNVMeSelfHealing inspects the desired state of the NVMe sessions with the current state and accordingly
31913192// identifies candidate sessions that require remediation. This function is invoked periodically.
31923193func (p * Plugin ) performNVMeSelfHealing (ctx context.Context ) {
3193- utils .Lock (ctx , nvmeSelfHealingLockContext , nodeLockID )
3194- defer utils .Unlock (ctx , nvmeSelfHealingLockContext , nodeLockID )
3194+ locks .Lock (ctx , nvmeSelfHealingLockContext , nodeLockID )
3195+ defer locks .Unlock (ctx , nvmeSelfHealingLockContext , nodeLockID )
31953196
31963197 defer func () {
31973198 if r := recover (); r != nil {
@@ -3240,7 +3241,7 @@ func (p *Plugin) fixNVMeSessions(ctx context.Context, stopAt time.Time, subsyste
32403241 // 1. We should fix at least one subsystem in a single self-healing thread.
32413242 // 2. If there's another thread waiting for the node lock and if we have exceeded our 60 secs lock, we should
32423243 // stop NVMe self-healing.
3243- if index > 0 && utils .WaitQueueSize (nodeLockID ) > 0 && time .Now ().After (stopAt ) {
3244+ if index > 0 && locks .WaitQueueSize (nodeLockID ) > 0 && time .Now ().After (stopAt ) {
32443245 Logc (ctx ).Info ("Self-healing has exceeded maximum runtime; preempting NVMe session self-healing." )
32453246 break
32463247 }
0 commit comments