Skip to content

Commit b4989cf

Browse files
authored
Cherry pick blkid luks fix
Cherry-pick blkid luks fix from master. Fixes an intermittent issue where blkid misidentifies the filesystem type for LUKS devices preventing new volume creations.
1 parent c8e30b9 commit b4989cf

File tree

21 files changed

+1720
-1478
lines changed

21 files changed

+1720
-1478
lines changed

core/orchestrator_core.go

Lines changed: 46 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,10 @@ import (
3838
drivers "github.com/netapp/trident/storage_drivers"
3939
"github.com/netapp/trident/storage_drivers/fake"
4040
"github.com/netapp/trident/utils/autogrow"
41+
"github.com/netapp/trident/utils/devices"
42+
"github.com/netapp/trident/utils/devices/luks"
4143
"github.com/netapp/trident/utils/errors"
44+
"github.com/netapp/trident/utils/exec"
4245
"github.com/netapp/trident/utils/fcp"
4346
"github.com/netapp/trident/utils/filesystem"
4447
"github.com/netapp/trident/utils/iscsi"
@@ -4076,13 +4079,52 @@ func (o *TridentOrchestrator) AttachVolume(
40764079
var err error
40774080
if publishInfo.SANType == sa.NVMe {
40784081
nvmeHandler := nvme.NewNVMeHandler()
4079-
err = nvmeHandler.AttachNVMeVolumeRetry(ctx, volumeName, mountpoint, publishInfo, map[string]string{},
4080-
nvme.NVMeAttachTimeout)
4082+
err = nvmeHandler.AttachNVMeVolumeRetry(ctx, publishInfo, nvme.NVMeAttachTimeout)
4083+
if err != nil {
4084+
return err
4085+
}
4086+
4087+
// Cryptsetup format if necessary and map to host
4088+
var luksFormatted bool
4089+
var safeToFsFormat bool
4090+
luksFormatted, safeToFsFormat, err = nvmeHandler.EnsureCryptsetupFormattedAndMappedOnHost(
4091+
ctx, volumeName, publishInfo, map[string]string{},
4092+
)
4093+
if err != nil {
4094+
return err
4095+
}
4096+
4097+
// Format and mount if necessary
4098+
if err = nvmeHandler.EnsureVolumeFormattedAndMounted(
4099+
ctx, volumeName, mountpoint, publishInfo, luksFormatted, safeToFsFormat,
4100+
); err != nil {
4101+
return err
4102+
}
40814103
}
40824104

40834105
if publishInfo.SANType == sa.ISCSI {
4084-
_, err = o.iscsi.AttachVolumeRetry(ctx, volumeName, mountpoint, publishInfo, map[string]string{},
4085-
AttachISCSIVolumeTimeoutLong)
4106+
_, err = o.iscsi.AttachVolumeRetry(ctx, publishInfo, AttachISCSIVolumeTimeoutLong)
4107+
if err != nil {
4108+
return err
4109+
}
4110+
4111+
// Cryptsetup format if necessary and map to host
4112+
command := exec.NewCommand()
4113+
var luksFormatted bool
4114+
var safeToFsFormat bool
4115+
luksFormatted, safeToFsFormat, err = luks.EnsureCryptsetupFormattedAndMappedOnHost(
4116+
ctx, volumeName, publishInfo, map[string]string{}, command, devices.New(),
4117+
)
4118+
if err != nil {
4119+
return err
4120+
}
4121+
4122+
// Format and mount if necessary
4123+
if err = o.iscsi.EnsureVolumeFormattedAndMounted(
4124+
ctx, volumeName, mountpoint, publishInfo, luksFormatted, safeToFsFormat,
4125+
); err != nil {
4126+
return err
4127+
}
40864128
}
40874129
return err
40884130
}

frontend/csi/node_server.go

Lines changed: 63 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1400,17 +1400,33 @@ func (p *Plugin) nodeStageFCPVolume(
14001400
}()
14011401

14021402
var mpathSize int64
1403-
mpathSize, err = p.ensureAttachFCPVolume(ctx, req, "", publishInfo, AttachFCPVolumeTimeoutShort)
1403+
// Attach the volume to the node
1404+
mpathSize, err = p.ensureAttachFCPVolume(ctx, publishInfo, AttachFCPVolumeTimeoutShort)
14041405
if err != nil {
14051406
return err
14061407
}
14071408

1409+
// Cryptsetup format if necessary and map to host
1410+
luksFormatted, safeToFsFormat, err := luks.EnsureCryptsetupFormattedAndMappedOnHost(
1411+
ctx, req.VolumeContext["internalName"], publishInfo, req.GetSecrets(), p.command, p.devices,
1412+
)
1413+
if err != nil {
1414+
return err
1415+
}
1416+
1417+
// Format and mount if necessary
1418+
if err = p.fcp.EnsureVolumeFormattedAndMounted(
1419+
ctx, req.VolumeContext["internalName"], "", publishInfo, luksFormatted, safeToFsFormat,
1420+
); err != nil {
1421+
return err
1422+
}
1423+
14081424
if isLUKS {
14091425
if err = betweenAttachAndLUKSPassphrase.Inject(); err != nil {
14101426
return err
14111427
}
14121428

1413-
luksDevice := luks.NewDevice(publishInfo.DevicePath, req.VolumeContext["internalName"], p.command)
1429+
luksDevice := luks.NewDevice(publishInfo.DevicePath, req.VolumeContext["internalName"], p.command, p.devices)
14141430

14151431
// Ensure we update the passphrase in case it has never been set before
14161432
err = ensureLUKSVolumePassphrase(ctx, p.restClient, luksDevice, volumeId, req.GetSecrets(), true)
@@ -1438,8 +1454,7 @@ func (p *Plugin) nodeStageFCPVolume(
14381454
// ensureAttachFCPVolume attempts to attach the volume to the local host
14391455
// with a retry logic based on the publish information passed in.
14401456
func (p *Plugin) ensureAttachFCPVolume(
1441-
ctx context.Context, req *csi.NodeStageVolumeRequest, mountpoint string,
1442-
publishInfo *models.VolumePublishInfo, attachTimeout time.Duration,
1457+
ctx context.Context, publishInfo *models.VolumePublishInfo, attachTimeout time.Duration,
14431458
) (int64, error) {
14441459
var err error
14451460
var mpathSize int64
@@ -1448,8 +1463,7 @@ func (p *Plugin) ensureAttachFCPVolume(
14481463
defer Logc(ctx).Debug("<<<< ensureAttachFCPVolume")
14491464

14501465
// Perform the login/rescan/discovery/(optionally)format, mount & get the device back in the publish info
1451-
if mpathSize, err = p.fcp.AttachVolumeRetry(ctx, req.VolumeContext["internalName"], mountpoint,
1452-
publishInfo, req.GetSecrets(), attachTimeout); err != nil {
1466+
if mpathSize, err = p.fcp.AttachVolumeRetry(ctx, publishInfo, attachTimeout); err != nil {
14531467
return mpathSize, status.Error(codes.Internal, fmt.Sprintf("failed to stage volume: %v", err))
14541468
}
14551469

@@ -1697,13 +1711,13 @@ func (p *Plugin) nodePublishFCPVolume(
16971711
if luks.IsLegacyDevicePath(devicePath) {
16981712
// Supports legacy volumes that store the LUKS device path
16991713
luksDevice, err = luks.NewDeviceFromMappingPath(
1700-
ctx, p.command, devicePath, req.VolumeContext["internalName"],
1714+
ctx, p.command, p.devices, devicePath, req.VolumeContext["internalName"],
17011715
)
17021716
if err != nil {
17031717
return nil, status.Error(codes.Internal, err.Error())
17041718
}
17051719
} else {
1706-
luksDevice = luks.NewDevice(publishInfo.DevicePath, req.VolumeContext["internalName"], p.command)
1720+
luksDevice = luks.NewDevice(publishInfo.DevicePath, req.VolumeContext["internalName"], p.command, p.devices)
17071721
}
17081722

17091723
err = ensureLUKSVolumePassphrase(ctx, p.restClient, luksDevice, req.GetVolumeId(), req.GetSecrets(), false)
@@ -1864,17 +1878,33 @@ func (p *Plugin) nodeStageISCSIVolume(
18641878
}()
18651879

18661880
var mpathSize int64
1867-
mpathSize, err = p.ensureAttachISCSIVolume(ctx, req, "", publishInfo, AttachISCSIVolumeTimeoutShort)
1881+
// Attach the volume to the node
1882+
mpathSize, err = p.ensureAttachISCSIVolume(ctx, req, publishInfo, AttachISCSIVolumeTimeoutShort)
18681883
if err != nil {
18691884
return err
18701885
}
18711886

1887+
// Cryptsetup format if necessary and map to host
1888+
luksFormatted, safeToFsFormat, err := luks.EnsureCryptsetupFormattedAndMappedOnHost(
1889+
ctx, req.VolumeContext["internalName"], publishInfo, req.GetSecrets(), p.command, p.devices,
1890+
)
1891+
if err != nil {
1892+
return err
1893+
}
1894+
1895+
// Format and mount if necessary
1896+
if err = p.iscsi.EnsureVolumeFormattedAndMounted(
1897+
ctx, req.VolumeContext["internalName"], "", publishInfo, luksFormatted, safeToFsFormat,
1898+
); err != nil {
1899+
return err
1900+
}
1901+
18721902
if isLUKS {
18731903
if err = betweenAttachAndLUKSPassphrase.Inject(); err != nil {
18741904
return err
18751905
}
18761906

1877-
luksDevice := luks.NewDevice(publishInfo.DevicePath, req.VolumeContext["internalName"], p.command)
1907+
luksDevice := luks.NewDevice(publishInfo.DevicePath, req.VolumeContext["internalName"], p.command, p.devices)
18781908

18791909
// Ensure we update the passphrase in case it has never been set before
18801910
err = ensureLUKSVolumePassphrase(ctx, p.restClient, luksDevice, volumeId, req.GetSecrets(), true)
@@ -1918,25 +1948,22 @@ func (p *Plugin) nodeStageISCSIVolume(
19181948
// ensureAttachISCSIVolume attempts to attach the volume to the local host
19191949
// with a retry logic based on the publish information passed in.
19201950
func (p *Plugin) ensureAttachISCSIVolume(
1921-
ctx context.Context, req *csi.NodeStageVolumeRequest, mountpoint string,
1951+
ctx context.Context, req *csi.NodeStageVolumeRequest,
19221952
publishInfo *models.VolumePublishInfo, attachTimeout time.Duration,
19231953
) (int64, error) {
19241954
var err error
19251955
var mpathSize int64
19261956

19271957
// Perform the login/rescan/discovery/(optionally)format, mount & get the device back in the publish info
1928-
if mpathSize, err = p.iscsi.AttachVolumeRetry(ctx, req.VolumeContext["internalName"], mountpoint,
1929-
publishInfo, req.GetSecrets(), attachTimeout); err != nil {
1958+
if mpathSize, err = p.iscsi.AttachVolumeRetry(ctx, publishInfo, attachTimeout); err != nil {
19301959
// Did we fail to log in?
19311960
if errors.IsAuthError(err) {
19321961
// Update CHAP info from the controller and try one more time.
19331962
Logc(ctx).Warn("iSCSI login failed; will retrieve CHAP credentials from Trident controller and try again.")
19341963
if err = p.updateChapInfoFromController(ctx, req, publishInfo); err != nil {
19351964
return mpathSize, status.Error(codes.Internal, err.Error())
19361965
}
1937-
if mpathSize, err = p.iscsi.AttachVolumeRetry(ctx, req.VolumeContext["internalName"], mountpoint,
1938-
publishInfo,
1939-
req.GetSecrets(), attachTimeout); err != nil {
1966+
if mpathSize, err = p.iscsi.AttachVolumeRetry(ctx, publishInfo, attachTimeout); err != nil {
19401967
// Bail out no matter what as we've now tried with updated credentials
19411968
return mpathSize, status.Error(codes.Internal, err.Error())
19421969
}
@@ -2303,10 +2330,10 @@ func (p *Plugin) nodePublishISCSIVolume(
23032330
var err error
23042331
if luks.IsLegacyDevicePath(devicePath) {
23052332
// Supports legacy volumes that store the LUKS device path
2306-
luksDevice, err = luks.NewDeviceFromMappingPath(ctx, p.command, devicePath,
2333+
luksDevice, err = luks.NewDeviceFromMappingPath(ctx, p.command, p.devices, devicePath,
23072334
req.VolumeContext["internalName"])
23082335
} else {
2309-
luksDevice = luks.NewDevice(publishInfo.DevicePath, req.VolumeContext["internalName"], p.command)
2336+
luksDevice = luks.NewDevice(publishInfo.DevicePath, req.VolumeContext["internalName"], p.command, p.devices)
23102337
}
23112338

23122339
if err != nil {
@@ -2669,7 +2696,7 @@ func (p *Plugin) selfHealingRectifySession(ctx context.Context, portal string, a
26692696
}
26702697

26712698
publishedCHAPCredentials := publishInfo.IscsiChapInfo
2672-
if _, err = p.ensureAttachISCSIVolume(ctx, req, "", publishInfo, iSCSILoginTimeout); err != nil {
2699+
if _, err = p.ensureAttachISCSIVolume(ctx, req, publishInfo, iSCSILoginTimeout); err != nil {
26732700
return fmt.Errorf("failed to login to the target")
26742701
}
26752702

@@ -3014,13 +3041,26 @@ func (p *Plugin) nodeStageNVMeVolume(
30143041
publishInfo.SANType = req.PublishContext["SANType"]
30153042
publishInfo.FormatOptions = req.PublishContext["formatOptions"]
30163043

3017-
err := p.nvmeHandler.AttachNVMeVolumeRetry(
3018-
ctx, req.VolumeContext["internalName"], "", publishInfo, req.GetSecrets(), nvme.NVMeAttachTimeout,
3044+
err := p.nvmeHandler.AttachNVMeVolumeRetry(ctx, publishInfo, nvme.NVMeAttachTimeout)
3045+
if err != nil {
3046+
return err
3047+
}
3048+
3049+
// Cryptsetup format if necessary and map to host
3050+
luksFormatted, safeToFormat, err := p.nvmeHandler.EnsureCryptsetupFormattedAndMappedOnHost(
3051+
ctx, req.VolumeContext["internalName"], publishInfo, req.GetSecrets(),
30193052
)
30203053
if err != nil {
30213054
return err
30223055
}
30233056

3057+
// Format and mount if necessary
3058+
if err = p.nvmeHandler.EnsureVolumeFormattedAndMounted(
3059+
ctx, req.VolumeContext["internalName"], "", publishInfo, luksFormatted, safeToFormat,
3060+
); err != nil {
3061+
return err
3062+
}
3063+
30243064
volumeId, stagingTargetPath, err := p.getVolumeIdAndStagingPath(req)
30253065
if err != nil {
30263066
return err
@@ -3031,7 +3071,7 @@ func (p *Plugin) nodeStageNVMeVolume(
30313071
return err
30323072
}
30333073

3034-
luksDevice := luks.NewDevice(publishInfo.DevicePath, req.VolumeContext["internalName"], p.command)
3074+
luksDevice := luks.NewDevice(publishInfo.DevicePath, req.VolumeContext["internalName"], p.command, p.devices)
30353075

30363076
// Ensure we update the passphrase in case it has never been set before
30373077
err = ensureLUKSVolumePassphrase(ctx, p.restClient, luksDevice, volumeId, req.GetSecrets(), true)
@@ -3259,7 +3299,7 @@ func (p *Plugin) nodePublishNVMeVolume(
32593299
devicePath := publishInfo.DevicePath
32603300
if convert.ToBool(publishInfo.LUKSEncryption) {
32613301
// Rotate the LUKS passphrase if needed, on failure, log and continue to publish
3262-
luksDevice := luks.NewDevice(devicePath, req.VolumeContext["internalName"], p.command)
3302+
luksDevice := luks.NewDevice(devicePath, req.VolumeContext["internalName"], p.command, p.devices)
32633303

32643304
err = ensureLUKSVolumePassphrase(ctx, p.restClient, luksDevice, req.GetVolumeId(), req.GetSecrets(), false)
32653305
if err != nil {

0 commit comments

Comments
 (0)