@@ -175,10 +175,27 @@ func NewManager(opts Options) (*Manager, error) {
175
175
}
176
176
177
177
for _ , vol := range vols {
178
- m .log .Info ("Registering existing data directory for management" , "volume" , vol )
179
- if err := m .ManageVolume (vol ); err != nil {
180
- return nil , fmt .Errorf ("loading existing volume: %w" , err )
178
+ log := m .log .WithValues ("volume_id" , vol )
179
+ meta , err := opts .MetadataReader .ReadMetadata (vol )
180
+ if err != nil {
181
+ // This implies something has modified the state store whilst we are starting up
182
+ // return the error and hope that next time we startup, nothing else changes the filesystem
183
+ return nil , fmt .Errorf ("reading existing volume metadata: %w" , err )
184
+ }
185
+ if meta .NextIssuanceTime == nil {
186
+ // This implies that a successful issuance has never been completed for this volume.
187
+ // don't register these volumes for management automatically as they could be leftover
188
+ // from a previous instance of the CSI driver handling a NodePublishVolume call that was
189
+ // not able to clean up the state store before an unexpected exit.
190
+ // Whatever is calling the CSI plugin should call NodePublishVolume again relatively soon
191
+ // after we start up, which will trigger management to resume.
192
+ // TODO: we should probably consider deleting the volume from the state store in these instances
193
+ // to avoid having leftover metadata files for pods that don't actually exist anymore.
194
+ log .Info ("Skipping management of volume that has never successfully completed" )
195
+ continue
181
196
}
197
+ log .Info ("Registering existing data directory for management" , "volume" , vol )
198
+ m .ManageVolume (vol )
182
199
}
183
200
184
201
return m , nil
0 commit comments