diff --git a/charts/aws-efs-csi-driver/templates/controller-deployment.yaml b/charts/aws-efs-csi-driver/templates/controller-deployment.yaml
index 4753b16fa..d699da78d 100644
--- a/charts/aws-efs-csi-driver/templates/controller-deployment.yaml
+++ b/charts/aws-efs-csi-driver/templates/controller-deployment.yaml
@@ -79,6 +79,7 @@ spec:
{{- end }}
- --v={{ .Values.controller.logLevel }}
- --delete-access-point-root-dir={{ hasKey .Values.controller "deleteAccessPointRootDir" | ternary .Values.controller.deleteAccessPointRootDir false }}
+ - --delete-provisioned-dir={{ hasKey .Values.controller "deleteProvisionedDir" | ternary .Values.controller.deleteProvisionedDir false }}
env:
- name: CSI_ENDPOINT
value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock
diff --git a/charts/aws-efs-csi-driver/values.yaml b/charts/aws-efs-csi-driver/values.yaml
index 89c637a67..4f6140fb0 100644
--- a/charts/aws-efs-csi-driver/values.yaml
+++ b/charts/aws-efs-csi-driver/values.yaml
@@ -65,8 +65,10 @@ controller:
# environment: prod
# region: us-east-1
# Enable if you want the controller to also delete the
- # path on efs when deleteing an access point
+ # path on efs when deleting an EFS access point
deleteAccessPointRootDir: false
+ # Enable if you want the controller to delete any directories it also provisions
+ deleteProvisionedDir: false
podAnnotations: {}
podLabels: {}
hostNetwork: false
diff --git a/cmd/main.go b/cmd/main.go
index 98e195a62..be97b0e35 100644
--- a/cmd/main.go
+++ b/cmd/main.go
@@ -41,6 +41,8 @@ func main() {
volMetricsFsRateLimit = flag.Int("vol-metrics-fs-rate-limit", 5, "Volume metrics routines rate limiter per file system")
deleteAccessPointRootDir = flag.Bool("delete-access-point-root-dir", false,
"Opt in to delete access point root directory by DeleteVolume. By default, DeleteVolume will delete the access point behind Persistent Volume and deleting access point will not delete the access point root directory or its contents.")
+ deleteProvisionedDir = flag.Bool("delete-provisioned-dir", false,
+ "Opt in to delete any provisioned directories and their contents. By default, DeleteVolume will not delete the directory behind Persistent Volume. Note: If root squash is enabled on your EFS it's possible this option will not work.")
adaptiveRetryMode = flag.Bool("adaptive-retry-mode", true, "Opt out to use standard sdk retry configuration. By default, adaptive retry mode will be used to more heavily client side rate limit EFS API requests.")
tags = flag.String("tags", "", "Space separated key:value pairs which will be added as tags for EFS resources. For example, 'environment:prod region:us-east-1'")
)
@@ -61,7 +63,7 @@ func main() {
if err != nil {
klog.Fatalln(err)
}
- drv := driver.NewDriver(*endpoint, etcAmazonEfs, *efsUtilsStaticFilesPath, *tags, *volMetricsOptIn, *volMetricsRefreshPeriod, *volMetricsFsRateLimit, *deleteAccessPointRootDir, *adaptiveRetryMode)
+ drv := driver.NewDriver(*endpoint, etcAmazonEfs, *efsUtilsStaticFilesPath, *tags, *volMetricsOptIn, *volMetricsRefreshPeriod, *volMetricsFsRateLimit, *deleteAccessPointRootDir, *adaptiveRetryMode, *deleteProvisionedDir)
if err := drv.Run(); err != nil {
klog.Fatalln(err)
}
diff --git a/docs/README.md b/docs/README.md
index 9307c35c9..be3ef31a4 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -25,22 +25,27 @@ The following CSI interfaces are implemented:
* Identity Service: GetPluginInfo, GetPluginCapabilities, Probe
### Storage Class Parameters for Dynamic Provisioning
-| Parameters | Values | Default | Optional | Description |
-|-----------------------|--------|-----------------|----------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| provisioningMode | efs-ap | | false | Type of volume provisioned by efs. Currently, Access Points are supported. |
-| fileSystemId | | | false | File System under which access points are created. |
-| directoryPerms | | | false | Directory permissions for [Access Point root directory](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html#enforce-root-directory-access-point) creation. |
-| uid | | | true | POSIX user Id to be applied for [Access Point root directory](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html#enforce-root-directory-access-point) creation. |
-| gid | | | true | POSIX group Id to be applied for [Access Point root directory](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html#enforce-root-directory-access-point) creation. |
-| gidRangeStart | | 50000 | true | Start range of the POSIX group Id to be applied for [Access Point root directory](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html#enforce-root-directory-access-point) creation. Not used if uid/gid is set. |
-| gidRangeEnd | | 7000000 | true | End range of the POSIX group Id. Not used if uid/gid is set. |
-| basePath | | | true | Path under which access points for dynamic provisioning is created. If this parameter is not specified, access points are created under the root directory of the file system |
-| subPathPattern | | `/${.PV.name}` | true | The template used to construct the subPath under which each of the access points created under Dynamic Provisioning. Can be made up of fixed strings and limited variables, is akin to the 'subPathPattern' variable on the [nfs-subdir-external-provisioner](https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner) chart. Supports `.PVC.name`,`.PVC.namespace` and `.PV.name` |
-| ensureUniqueDirectory | | true | true | **NOTE: Only set this to false if you're sure this is the behaviour you want**.
Used when dynamic provisioning is enabled, if set to true, appends the a UID to the pattern specified in `subPathPattern` to ensure that access points will not accidentally point at the same directory. |
-| az | | "" | true | Used for cross-account mount. `az` under storage class parameter is optional. If specified, mount target associated with the az will be used for cross-account mount. If not specified, a random mount target will be picked for cross account mount |
-| reuseAccessPoint | | false | true | When set to true, it creates the Access Point client-token from the provided PVC name. So that the AccessPoint can be replicated from a different cluster if same PVC name and storageclass configuration are used. |
+| Parameters | Values | Default | Optional | Description |
+|-----------------------|----------------|-----------------|----------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| provisioningMode | efs-ap/efs-dir | | false | Type of volume provisioned by efs. `efs-ap` will provision EFS Access Points, while `efs-dir` will provision directories on the EFS instead. |
+| fileSystemId | | | false | File System under which access points are created. |
+| directoryPerms | | | false | Directory permissions for [Access Point root directory](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html#enforce-root-directory-access-point) creation. |
+| uid | | | true | POSIX user Id to be applied for [Access Point root directory](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html#enforce-root-directory-access-point) creation. |
+| gid | | | true | POSIX group Id to be applied for [Access Point root directory](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html#enforce-root-directory-access-point) creation. |
+| gidRangeStart | | 50000 | true | Start range of the POSIX group Id to be applied for [Access Point root directory](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html#enforce-root-directory-access-point) creation. Not used if uid/gid is set. Used only for provisioningMode "efs-ap". |
+| gidRangeEnd | | 7000000 | true | End range of the POSIX group Id. Not used if uid/gid is set. Used only for provisioningMode "efs-ap". |
+| basePath | | | true | Path under which access points for dynamic provisioning is created. If this parameter is not specified, access points are created under the root directory of the file system |
+| subPathPattern | | `/${.PV.name}` | true | The template used to construct the subPath under which each of the access points created under Dynamic Provisioning. Can be made up of fixed strings and limited variables, is akin to the 'subPathPattern' variable on the [nfs-subdir-external-provisioner](https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner) chart. Supports `.PVC.name`,`.PVC.namespace` and `.PV.name`. Used only for provisioningMode "efs-ap". |
+| ensureUniqueDirectory | | true | true | **NOTE: Only set this to false if you're sure this is the behaviour you want**.
Used when dynamic provisioning is enabled, if set to true, appends the a UID to the pattern specified in `subPathPattern` to ensure that access points will not accidentally point at the same directory. Used only for provisioningMode "efs-ap". |
+| az | | "" | true | Used for cross-account mount. `az` under storage class parameter is optional. If specified, mount target associated with the az will be used for cross-account mount. If not specified, a random mount target will be picked for cross account mount |
+| reuseAccessPoint | | false | true | When set to true, it creates the Access Point client-token from the provided PVC name. So that the AccessPoint can be replicated from a different cluster if same PVC name and storageclass configuration are used. Used only for provisioningMode "efs-ap". |
**Note**
+* We suggest the following settings for each provisioning mode:
+ * `efs-ap` -> `directoryPerms: 770`, with `gidRangeStart` and `gidRangeEnd` set to sensible values
+ * `efs-dir` -> `directoryPerms: 770` with static `gid`
+ * This ensures that important system utilities will still be to access to the directories created
+ * All parameters are available in all modes but please think very carefully about what you're doing otherwise directories could be created that no-one can read or write to. For more information on directoryPerms for efs-dir mode, see the [Permission Management Considerations](#permission-management-considerations) section.
* Custom Posix group Id range for Access Point root directory must include both `gidRangeStart` and `gidRangeEnd` parameters. These parameters are optional only if both are omitted. If you specify one, the other becomes mandatory.
* When using a custom Posix group ID range, there is a possibility for the driver to run out of available POSIX group Ids. We suggest ensuring custom group ID range is large enough or create a new storage class with a new file system to provision additional volumes.
* `az` under storage class parameter is not be confused with efs-utils mount option `az`. The `az` mount option is used for cross-az mount or efs one zone file system mount within the same aws account as the cluster.
@@ -164,7 +169,7 @@ You can find previous efs-csi-driver versions' images from [here](https://galler
### Features
* Static provisioning - Amazon EFS file system needs to be created manually first, then it could be mounted inside container as a persistent volume (PV) using the driver.
-* Dynamic provisioning - Uses a persistent volume claim (PVC) to dynamically provision a persistent volume (PV). On Creating a PVC, kuberenetes requests Amazon EFS to create an Access Point in a file system which will be used to mount the PV.
+* Dynamic provisioning - Uses a persistent volume claim (PVC) to dynamically provision a persistent volume (PV). On Creating a PVC, kubernetes requests Amazon EFS to create an Access Point or a Directory in the file system which will be used to mount the PV.
* Mount Options - Mount options can be specified in the persistent volume (PV) or storage class for dynamic provisioning to define how the volume should be mounted.
* Encryption of data in transit - Amazon EFS file systems are mounted with encryption in transit enabled by default in the master branch version of the driver.
* Cross account mount - Amazon EFS file systems from different aws accounts can be mounted from an Amazon EKS cluster.
@@ -173,6 +178,20 @@ You can find previous efs-csi-driver versions' images from [here](https://galler
**Note**
Since Amazon EFS is an elastic file system, it doesn't really enforce any file system capacity. The actual storage capacity value in persistent volume and persistent volume claim is not used when creating the file system. However, since the storage capacity is a required field by Kubernetes, you must specify the value and you can use any valid value for the capacity.
+### Directory Provisioning Mode
+
+The EFS CSI Driver supports directory provisioning mode as an alternative to the access point provisioning. To use directory provisioning, set the `provisioningMode` parameter in your StorageClass to `efs-dir`
+
+#### Permission Management Considerations
+
+When using directory provisioning mode, carefully consider how to manage permissions for provisioned directories. The EFS CSI Driver supports setting static uid and gid for newly provisioned directories. This is achieved by specifying `uid` and `gid` parameters in the StorageClass definition. When these parameters are set, the driver will use them to set the ownership of new directories created during volume provisioning.
+
+The `directoryPerms` parameter sets the permissions for newly created directories. In order to ensure volumes are accessible, users must manage pod security contexts to align with the uid/gid and permissions set on the provisioned directories. Be aware that using the same supplemental group grants access to the root directory but may not automatically provide access to newly created subdirectories.
+
+Users can opt to use Kubernetes' fsGroup mechanism by setting the [fsGroupPolicy](https://kubernetes-csi.github.io/docs/support-fsgroup.html) on the CSI driver. **If using fsGroup, ensure all pods accessing the same volume use identical fsGroup values to prevent permission conflicts**. It's recommended to set the fsGroupChangePolicy to "OnRootMismatch" for better performance.
+
+For more information on pod security practices, see [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) in the Kubernetes documentation.
+
### Installation
**Considerations**
@@ -357,11 +376,12 @@ Enabling the vol-metrics-opt-in parameter activates the gathering of inode and d
### Container Arguments for deployment(controller)
-| Parameters | Values | Default | Optional | Description |
-|-----------------------------|--------|---------|----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| delete-access-point-root-dir| | false | true | Opt in to delete access point root directory by DeleteVolume. By default, DeleteVolume will delete the access point behind Persistent Volume and deleting access point will not delete the access point root directory or its contents. |
-| adaptive-retry-mode | | true | true | Opt out to use standard sdk retry mode for EFS API calls. By default, Driver will use adaptive mode for the sdk retry configuration which heavily rate limits EFS API requests to reduce throttling if throttling is observed. |
-| tags | | | true | Space separated key:value pairs which will be added as tags for Amazon EFS resources. For example, '--tags=name:efs-tag-test date:Jan24' |
+| Parameters | Values | Default | Optional | Description |
+|------------------------------|--------|---------|----------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| delete-access-point-root-dir | | false | true | Opt in to delete access point root directory by DeleteVolume. By default, DeleteVolume will delete the access point behind Persistent Volume and deleting access point will not delete the access point root directory or its contents. |
+| delete-provisioned-dir | | false | true | Opt in to delete any provisioned directories and their contents. By default, DeleteVolume will not delete the directory behind Persistent Volume. |
+| adaptive-retry-mode | | true | true | Opt out to use standard sdk retry mode for EFS API calls. By default, Driver will use adaptive mode for the sdk retry configuration which heavily rate limits EFS API requests to reduce throttling if throttling is observed. |
+| tags | | | true | Space separated key:value pairs which will be added as tags for Amazon EFS resources. For example, '--tags=name:efs-tag-test date:Jan24' |
### Upgrading the Amazon EFS CSI Driver
@@ -413,7 +433,8 @@ Before following the examples, you need to:
#### Example links
* [Static provisioning](../examples/kubernetes/static_provisioning/README.md)
-* [Dynamic provisioning](../examples/kubernetes/dynamic_provisioning/README.md)
+* [Dynamic provisioning with Access Points](../examples/kubernetes/dynamic_provisioning/access_points/README.md)
+* [Dynamic provisioning with Directories](../examples/kubernetes/dynamic_provisioning/directories/README.md)
* [Encryption in transit](../examples/kubernetes/encryption_in_transit/README.md)
* [Accessing the file system from multiple pods](../examples/kubernetes/multiple_pods/README.md)
* [Consume Amazon EFS in StatefulSets](../examples/kubernetes/statefulset/README.md)
diff --git a/examples/kubernetes/dynamic_provisioning/README.md b/examples/kubernetes/dynamic_provisioning/access_points/README.md
similarity index 97%
rename from examples/kubernetes/dynamic_provisioning/README.md
rename to examples/kubernetes/dynamic_provisioning/access_points/README.md
index 888a36191..808835d97 100644
--- a/examples/kubernetes/dynamic_provisioning/README.md
+++ b/examples/kubernetes/dynamic_provisioning/access_points/README.md
@@ -24,7 +24,7 @@ This example requires Kubernetes 1.17 or later and a driver version of 1.2.0 or
2. Download a `StorageClass` manifest for Amazon EFS.
```sh
- curl -O https://raw.githubusercontent.com/kubernetes-sigs/aws-efs-csi-driver/master/examples/kubernetes/dynamic_provisioning/specs/storageclass.yaml
+ curl -O https://raw.githubusercontent.com/kubernetes-sigs/aws-efs-csi-driver/master/examples/kubernetes/dynamic_provisioning/access_points/specs/storageclass.yaml
```
3. Edit [the file](./specs/storageclass.yaml). Find the following line, and replace the value for `fileSystemId` with your file system ID.
@@ -33,7 +33,6 @@ This example requires Kubernetes 1.17 or later and a driver version of 1.2.0 or
fileSystemId: fs-582a03f3
```
Modify the other values as needed:
- * `provisioningMode` - The type of volume to be provisioned by Amazon EFS. Currently, only access point based provisioning is supported (`efs-ap`).
* `fileSystemId` - The file system under which the access point is created.
* `directoryPerms` - The directory permissions of the root directory created by the access point.
* `gidRangeStart` (Optional) - The starting range of the Posix group ID to be applied onto the root directory of the access point. The default value is `50000`.
@@ -57,7 +56,7 @@ This example requires Kubernetes 1.17 or later and a driver version of 1.2.0 or
1. Download a manifest that deploys a Pod and a PVC.
```sh
- curl -O https://raw.githubusercontent.com/kubernetes-sigs/aws-efs-csi-driver/master/examples/kubernetes/dynamic_provisioning/specs/pod.yaml
+ curl -O https://raw.githubusercontent.com/kubernetes-sigs/aws-efs-csi-driver/master/examples/kubernetes/dynamic_provisioning/access_points/specs/pod.yaml
```
2. Deploy the Pod with a sample app and the PVC used by the Pod.
diff --git a/examples/kubernetes/dynamic_provisioning/specs/pod.yaml b/examples/kubernetes/dynamic_provisioning/access_points/specs/pod.yaml
similarity index 100%
rename from examples/kubernetes/dynamic_provisioning/specs/pod.yaml
rename to examples/kubernetes/dynamic_provisioning/access_points/specs/pod.yaml
diff --git a/examples/kubernetes/dynamic_provisioning/specs/storageclass.yaml b/examples/kubernetes/dynamic_provisioning/access_points/specs/storageclass.yaml
similarity index 100%
rename from examples/kubernetes/dynamic_provisioning/specs/storageclass.yaml
rename to examples/kubernetes/dynamic_provisioning/access_points/specs/storageclass.yaml
diff --git a/examples/kubernetes/dynamic_provisioning/directories/README.md b/examples/kubernetes/dynamic_provisioning/directories/README.md
new file mode 100644
index 000000000..e6be82186
--- /dev/null
+++ b/examples/kubernetes/dynamic_provisioning/directories/README.md
@@ -0,0 +1,150 @@
+## Dynamic Provisioning
+**Important**
+You can't use dynamic provisioning with Fargate nodes.
+
+This example shows how to create a dynamically provisioned volume created through a directory on the file system and a Persistent Volume Claim (PVC) and consume it from a pod.
+
+**Prerequisite**
+This example requires Kubernetes 1.17 or later and a driver version of 1.5.x or later.
+
+1. Create a storage class for Amazon EFS.
+
+ 1. Retrieve your Amazon EFS file system ID. You can find this in the Amazon EFS console, or use the following AWS CLI command.
+
+ ```sh
+ aws efs describe-file-systems --query "FileSystems[*].FileSystemId" --output text
+ ```
+
+ The example output is as follows.
+
+ ```
+ fs-582a03f3
+ ```
+
+ 2. Download a `StorageClass` manifest for Amazon EFS.
+
+ ```sh
+ curl -O https://raw.githubusercontent.com/kubernetes-sigs/aws-efs-csi-driver/master/examples/kubernetes/dynamic_provisioning/directories/specs/storageclass.yaml
+ ```
+
+ 3. Edit [the file](./specs/storageclass.yaml). Find the following line, and replace the value for `fileSystemId` with your file system ID.
+
+ ```
+ fileSystemId: fs-582a03f3
+ ```
+
+ Modify the other values as needed:
+ * `fileSystemId` - The file system under which the access point is created.
+ * `directoryPerms` - The directory permissions of the root directory created by the access point.
+ * `basePath` (Optional) - The path on the file system under which the access point root directory is created. If the path isn't provided, the access points root directory is created under the root of the file system.
+
+ 4. Deploy the storage class.
+
+ ```sh
+ kubectl apply -f storageclass.yaml
+ ```
+
+2. Test automatic provisioning by deploying a Pod that makes use of the PVC:
+
+ 1. Download a manifest that deploys a Pod and a PVC.
+
+ ```sh
+ curl -O https://raw.githubusercontent.com/kubernetes-sigs/aws-efs-csi-driver/master/examples/kubernetes/dynamic_provisioning/specs/pod.yaml
+ ```
+
+ 2. Deploy the Pod with a sample app and the PVC used by the Pod.
+
+ ```sh
+ kubectl apply -f pod.yaml
+ ```
+
+3. Determine the names of the Pods running the controller.
+
+ ```sh
+ kubectl get pods -n kube-system | grep efs-csi-controller
+ ```
+
+ The example output is as follows.
+
+ ```
+ efs-csi-controller-74ccf9f566-q5989 3/3 Running 0 40m
+ efs-csi-controller-74ccf9f566-wswg9 3/3 Running 0 40m
+ ```
+
+4. After few seconds, you can observe the controller picking up the change \(edited for readability\). Replace `74ccf9f566-q5989` with a value from one of the Pods in your output from the previous command.
+
+ ```sh
+ kubectl logs efs-csi-controller-74ccf9f566-q5989 \
+ -n kube-system \
+ -c csi-provisioner \
+ --tail 10
+ ```
+
+ The example output is as follows.
+
+ ```
+ [...]
+ 1 controller.go:737] successfully created PV pvc-5983ffec-96cf-40c1-9cd6-e5686ca84eca for PVC efs-claim...
+ ```
+
+ If you don't see the previous output, run the previous command using one of the other controller Pods.
+
+5. Confirm that a persistent volume was created with a status of `Bound` to a `PersistentVolumeClaim`:
+
+ ```sh
+ kubectl get pv
+ ```
+
+ The example output is as follows.
+
+ ```
+ NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
+ pvc-5983ffec-96cf-40c1-9cd6-e5686ca84eca 20Gi RWX Delete Bound default/efs-claim efs-sc 7m57s
+ ```
+
+6. View details about the `PersistentVolumeClaim` that was created.
+
+ ```sh
+ kubectl get pvc
+ ```
+
+ The example output is as follows.
+
+ ```
+ NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
+ efs-claim Bound pvc-5983ffec-96cf-40c1-9cd6-e5686ca84eca 20Gi RWX efs-sc 9m7s
+ ```
+
+7. View the sample app Pod's status until the `STATUS` becomes `Running`.
+
+ ```sh
+ kubectl get pods -o wide
+ ```
+
+ The example output is as follows.
+
+ ```
+ NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+ efs-app 1/1 Running 0 10m 192.168.78.156 ip-192-168-73-191.region-code.compute.internal
+ ```
+**Note**
+If a Pod doesn't have an IP address listed, make sure that you added a mount target for the subnet that your node is in \(as described at the end of [Create an Amazon EFS file system](#efs-create-filesystem)\). Otherwise the Pod won't leave `ContainerCreating` status. When an IP address is listed, it may take a few minutes for a Pod to reach the `Running` status.
+
+1. Confirm that the data is written to the volume.
+
+ ```sh
+ kubectl exec efs-app -- bash -c "cat data/out"
+ ```
+
+ The example output is as follows.
+
+ ```
+ [...]
+ Tue Mar 23 14:29:16 UTC 2021
+ Tue Mar 23 14:29:21 UTC 2021
+ Tue Mar 23 14:29:26 UTC 2021
+ Tue Mar 23 14:29:31 UTC 2021
+ [...]
+ ```
+
+2. \(Optional\) Terminate the Amazon EKS node that your Pod is running on and wait for the Pod to be re\-scheduled. Alternately, you can delete the Pod and redeploy it. Complete the previous step again, confirming that the output includes the previous output.
diff --git a/examples/kubernetes/dynamic_provisioning/directories/specs/pod.yaml b/examples/kubernetes/dynamic_provisioning/directories/specs/pod.yaml
new file mode 100644
index 000000000..522a690e1
--- /dev/null
+++ b/examples/kubernetes/dynamic_provisioning/directories/specs/pod.yaml
@@ -0,0 +1,30 @@
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: efs-claim
+spec:
+ accessModes:
+ - ReadWriteMany
+ storageClassName: efs-sc
+ resources:
+ requests:
+ storage: 5Gi
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: efs-app
+spec:
+ containers:
+ - name: app
+ image: centos
+ command: ["/bin/sh"]
+ args: ["-c", "while true; do echo $(date -u) >> /data/out; sleep 5; done"]
+ volumeMounts:
+ - name: persistent-storage
+ mountPath: /data
+ volumes:
+ - name: persistent-storage
+ persistentVolumeClaim:
+ claimName: efs-claim
\ No newline at end of file
diff --git a/examples/kubernetes/dynamic_provisioning/directories/specs/storageclass.yaml b/examples/kubernetes/dynamic_provisioning/directories/specs/storageclass.yaml
new file mode 100644
index 000000000..d9c048c84
--- /dev/null
+++ b/examples/kubernetes/dynamic_provisioning/directories/specs/storageclass.yaml
@@ -0,0 +1,10 @@
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ name: efs-sc
+provisioner: efs.csi.aws.com
+parameters:
+ provisioningMode: efs-dir
+ fileSystemId: fs-92107410
+ directoryPerms: "700"
+ basePath: "/dynamic_provisioning" # optional
diff --git a/hack/values.yaml b/hack/values.yaml
index f863f750e..246788080 100644
--- a/hack/values.yaml
+++ b/hack/values.yaml
@@ -1,8 +1,8 @@
controller:
create: true
logLevel: 5
+ serviceAccount:
+ create: true
+ deleteProvisionedDir: true
node:
logLevel: 5
-serviceAccount:
- controller:
- create: true
diff --git a/hack/values_eksctl.yaml b/hack/values_eksctl.yaml
index f99eec38d..d18e1b1df 100644
--- a/hack/values_eksctl.yaml
+++ b/hack/values_eksctl.yaml
@@ -2,6 +2,7 @@ controller:
logLevel: 5
serviceAccount:
create: false # let eksctl create it
+ deleteProvisionedDir: true
node:
logLevel: 5
serviceAccount:
diff --git a/pkg/driver/controller.go b/pkg/driver/controller.go
index 4ecbef96b..93b740e13 100644
--- a/pkg/driver/controller.go
+++ b/pkg/driver/controller.go
@@ -20,14 +20,8 @@ import (
"context"
"crypto/sha256"
"fmt"
- "os"
- "path"
"sort"
- "strconv"
"strings"
- "time"
-
- "github.com/google/uuid"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/kubernetes-sigs/aws-efs-csi-driver/pkg/cloud"
@@ -46,6 +40,7 @@ const (
DefaultTagKey = "efs.csi.aws.com/cluster"
DefaultTagValue = "true"
DirectoryPerms = "directoryPerms"
+ DirectoryMode = "efs-dir"
EnsureUniqueDirectory = "ensureUniqueDirectory"
FsId = "fileSystemId"
Gid = "gid"
@@ -83,32 +78,6 @@ var (
func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
klog.V(4).Infof("CreateVolume: called with args %+v", util.SanitizeRequest(*req))
- var reuseAccessPoint bool
- var err error
- volumeParams := req.GetParameters()
- volName := req.GetName()
- clientToken := volName
-
- // if true, then use sha256 hash of pvcName as clientToken instead of PVC Id
- // This allows users to reconnect to the same AP from different k8s cluster
- if reuseAccessPointStr, ok := volumeParams[ReuseAccessPointKey]; ok {
- reuseAccessPoint, err = strconv.ParseBool(reuseAccessPointStr)
- if err != nil {
- return nil, status.Error(codes.InvalidArgument, "Invalid value for reuseAccessPoint parameter")
- }
- if reuseAccessPoint {
- clientToken = get64LenHash(volumeParams[PvcNameKey])
- klog.V(5).Infof("Client token : %s", clientToken)
- }
- }
- if volName == "" {
- return nil, status.Error(codes.InvalidArgument, "Volume name not provided")
- }
-
- // Volume size is required to match PV to PVC by k8s.
- // Volume size is not consumed by EFS for any purposes.
- volSize := req.GetCapacityRange().GetRequiredBytes()
-
volCaps := req.GetVolumeCapabilities()
if len(volCaps) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume capabilities not provided")
@@ -121,425 +90,58 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest)
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("Volume fstype not supported: %s", err))
}
- var (
- azName string
- basePath string
- gid int64
- gidMin int64
- gidMax int64
- localCloud cloud.Cloud
- provisioningMode string
- roleArn string
- uid int64
- crossAccountDNSEnabled bool
- )
-
- //Parse parameters
+ volumeParams := req.GetParameters()
if value, ok := volumeParams[ProvisioningMode]; ok {
- provisioningMode = value
- //TODO: Add FS provisioning mode check when implemented
- if provisioningMode != AccessPointMode {
- errStr := "Provisioning mode " + provisioningMode + " is not supported. Only Access point provisioning: 'efs-ap' is supported"
- return nil, status.Error(codes.InvalidArgument, errStr)
+ if _, ok = d.provisioners[value]; !ok {
+ return nil, status.Errorf(codes.InvalidArgument, "Provisioning mode %s is not supported.", value)
}
} else {
return nil, status.Errorf(codes.InvalidArgument, "Missing %v parameter", ProvisioningMode)
}
- accessPointsOptions := &cloud.AccessPointOptions{
- CapacityGiB: volSize,
- }
+ mode := volumeParams[ProvisioningMode]
+ provisioner := d.provisioners[mode]
- if value, ok := volumeParams[FsId]; ok {
- if strings.TrimSpace(value) == "" {
- return nil, status.Errorf(codes.InvalidArgument, "Parameter %v cannot be empty", FsId)
- }
- accessPointsOptions.FileSystemId = value
- } else {
- return nil, status.Errorf(codes.InvalidArgument, "Missing %v parameter", FsId)
- }
+ klog.V(5).Infof("CreateVolume: provisioning mode %s selected. Support modes are %s", mode,
+ strings.Join(d.getProvisioningModes(), ","))
- localCloud, roleArn, crossAccountDNSEnabled, err = getCloud(req.GetSecrets(), d)
+ volume, err := provisioner.Provision(ctx, req)
if err != nil {
return nil, err
}
- var accessPoint *cloud.AccessPoint
- //if reuseAccessPoint is true, check for AP with same Root Directory exists in efs
- // if found reuse that AP
- if reuseAccessPoint {
- existingAP, err := localCloud.FindAccessPointByClientToken(ctx, clientToken, accessPointsOptions.FileSystemId)
- if err != nil {
- return nil, fmt.Errorf("failed to find access point: %v", err)
- }
- if existingAP != nil {
- //AP path already exists
- klog.V(2).Infof("Existing AccessPoint found : %+v", existingAP)
- accessPoint = &cloud.AccessPoint{
- AccessPointId: existingAP.AccessPointId,
- FileSystemId: existingAP.FileSystemId,
- CapacityGiB: accessPointsOptions.CapacityGiB,
- }
-
- // Take the lock to prevent this access point from being deleted while creating volume
- if d.lockManager.lockMutex(accessPoint.AccessPointId, ApLockWaitTimeSec*time.Second) {
- defer d.lockManager.unlockMutex(accessPoint.AccessPointId)
- } else {
- return nil, status.Errorf(codes.Internal, "Could not take the lock for existing access point: %v", accessPoint.AccessPointId)
- }
- }
- }
-
- if accessPoint == nil {
- // Create tags
- tags := map[string]string{
- DefaultTagKey: DefaultTagValue,
- }
-
- // Append input tags to default tag
- if len(d.tags) != 0 {
- for k, v := range d.tags {
- tags[k] = v
- }
- }
-
- accessPointsOptions.Tags = tags
-
- uid = -1
- if value, ok := volumeParams[Uid]; ok {
- uid, err = strconv.ParseInt(value, 10, 64)
- if err != nil {
- return nil, status.Errorf(codes.InvalidArgument, "Failed to parse invalid %v: %v", Uid, err)
- }
- if uid < 0 {
- return nil, status.Errorf(codes.InvalidArgument, "%v must be greater or equal than 0", Uid)
- }
- }
-
- gid = -1
- if value, ok := volumeParams[Gid]; ok {
- gid, err = strconv.ParseInt(value, 10, 64)
- if err != nil {
- return nil, status.Errorf(codes.InvalidArgument, "Failed to parse invalid %v: %v", Gid, err)
- }
- if gid < 0 {
- return nil, status.Errorf(codes.InvalidArgument, "%v must be greater or equal than 0", Gid)
- }
- }
-
- if value, ok := volumeParams[GidMin]; ok {
- gidMin, err = strconv.ParseInt(value, 10, 64)
- if err != nil {
- return nil, status.Errorf(codes.InvalidArgument, "Failed to parse invalid %v: %v", GidMin, err)
- }
- if gidMin <= 0 {
- return nil, status.Errorf(codes.InvalidArgument, "%v must be greater than 0", GidMin)
- }
- }
-
- if value, ok := volumeParams[GidMax]; ok {
- // Ensure GID min is provided with GID max
- if gidMin == 0 {
- return nil, status.Errorf(codes.InvalidArgument, "Missing %v parameter", GidMin)
- }
- gidMax, err = strconv.ParseInt(value, 10, 64)
- if err != nil {
- return nil, status.Errorf(codes.InvalidArgument, "Failed to parse invalid %v: %v", GidMax, err)
- }
- if gidMax <= gidMin {
- return nil, status.Errorf(codes.InvalidArgument, "%v must be greater than %v", GidMax, GidMin)
- }
- } else {
- // Ensure GID max is provided with GID min
- if gidMin != 0 {
- return nil, status.Errorf(codes.InvalidArgument, "Missing %v parameter", GidMax)
- }
- }
-
- // Assign default GID ranges if not provided
- if gidMin == 0 && gidMax == 0 {
- gidMin = DefaultGidMin
- gidMax = DefaultGidMax
- }
-
- if value, ok := volumeParams[DirectoryPerms]; ok {
- accessPointsOptions.DirectoryPerms = value
- }
-
- // Storage class parameter `az` will be used to fetch preferred mount target for cross account mount.
- // If the `az` storage class parameter is not provided, a random mount target will be picked for mounting.
- // This storage class parameter different from `az` mount option provided by efs-utils https://github.com/aws/efs-utils/blob/v1.31.1/src/mount_efs/__init__.py#L195
- // The `az` mount option provided by efs-utils is used for cross az mount or to provide az of efs one zone file system mount within the same aws-account.
- // To make use of the `az` mount option, add it under storage class's `mountOptions` section. https://kubernetes.io/docs/concepts/storage/storage-classes/#mount-options
- if value, ok := volumeParams[AzName]; ok {
- azName = value
- }
-
- // Check if file system exists. Describe FS or List APs handle appropriate error codes
- // With dynamic uid/gid provisioning we can save a call to describe FS, as list APs fails if FS ID does not exist
- var accessPoints []*cloud.AccessPoint
- if uid == -1 || gid == -1 {
- accessPoints, err = localCloud.ListAccessPoints(ctx, accessPointsOptions.FileSystemId)
- } else {
- _, err = localCloud.DescribeFileSystem(ctx, accessPointsOptions.FileSystemId)
- }
- if err != nil {
- if err == cloud.ErrAccessDenied {
- return nil, status.Errorf(codes.Unauthenticated, "Access Denied. Please ensure you have the right AWS permissions: %v", err)
- }
- if err == cloud.ErrNotFound {
- return nil, status.Errorf(codes.InvalidArgument, "File System does not exist: %v", err)
- }
- return nil, status.Errorf(codes.Internal, "Failed to fetch Access Points or Describe File System: %v", err)
- }
-
- var allocatedGid int64
- if uid == -1 || gid == -1 {
- allocatedGid, err = d.gidAllocator.getNextGid(accessPointsOptions.FileSystemId, accessPoints, gidMin, gidMax)
- if err != nil {
- return nil, err
- }
- }
- if uid == -1 {
- uid = allocatedGid
- }
- if gid == -1 {
- gid = allocatedGid
- }
-
- if value, ok := volumeParams[BasePath]; ok {
- basePath = value
- }
-
- rootDirName := volName
- // Check if a custom structure should be imposed on the access point directory
- if value, ok := volumeParams[SubPathPattern]; ok {
- // Try and construct the root directory and check it only contains supported components
- val, err := interpolateRootDirectoryName(value, volumeParams)
- if err == nil {
- klog.Infof("Using user-specified structure for access point directory.")
- rootDirName = val
- if value, ok := volumeParams[EnsureUniqueDirectory]; ok {
- if ensureUniqueDirectory, err := strconv.ParseBool(value); !ensureUniqueDirectory && err == nil {
- klog.Infof("Not appending PVC UID to path.")
- } else {
- klog.Infof("Appending PVC UID to path.")
- rootDirName = fmt.Sprintf("%s-%s", val, uuid.New().String())
- }
- } else {
- klog.Infof("Appending PVC UID to path.")
- rootDirName = fmt.Sprintf("%s-%s", val, uuid.New().String())
- }
- } else {
- return nil, err
- }
- } else {
- klog.Infof("Using PV name for access point directory.")
- }
-
- rootDir := path.Join("/", basePath, rootDirName)
- if ok, err := validateEfsPathRequirements(rootDir); !ok {
- return nil, err
- }
- klog.Infof("Using %v as the access point directory.", rootDir)
-
- accessPointsOptions.Uid = uid
- accessPointsOptions.Gid = gid
- accessPointsOptions.DirectoryPath = rootDir
-
- accessPoint, err = localCloud.CreateAccessPoint(ctx, clientToken, accessPointsOptions)
- if err != nil {
- if err == cloud.ErrAccessDenied {
- return nil, status.Errorf(codes.Unauthenticated, "Access Denied. Please ensure you have the right AWS permissions: %v", err)
- }
- if err == cloud.ErrAlreadyExists {
- return nil, status.Errorf(codes.AlreadyExists, "Access Point already exists")
- }
- return nil, status.Errorf(codes.Internal, "Failed to create Access point in File System %v : %v", accessPointsOptions.FileSystemId, err)
- }
-
- // Lock on the new access point to prevent accidental deletion before creation is done
- if d.lockManager.lockMutex(accessPoint.AccessPointId, ApLockWaitTimeSec*time.Second) {
- defer d.lockManager.unlockMutex(accessPoint.AccessPointId)
- } else {
- return nil, status.Errorf(codes.Internal, "Could not take the lock after creating access point: %v", accessPoint.AccessPointId)
- }
- }
-
- volContext := map[string]string{}
-
- // Enable cross-account dns resolution or fetch mount target Ip for cross-account mount
- if roleArn != "" {
- if crossAccountDNSEnabled {
- // This option indicates the customer would like to use DNS to resolve
- // the cross-account mount target ip address (in order to mount to
- // the same AZ-ID as the client instance); mounttargetip should
- // not be used as a mount option in this case.
- volContext[CrossAccount] = strconv.FormatBool(true)
- } else {
- mountTarget, err := localCloud.DescribeMountTargets(ctx, accessPointsOptions.FileSystemId, azName)
- if err != nil {
- klog.Warningf("Failed to describe mount targets for file system %v. Skip using `mounttargetip` mount option: %v", accessPointsOptions.FileSystemId, err)
- } else {
- volContext[MountTargetIp] = mountTarget.IPAddress
- }
-
- }
- }
-
return &csi.CreateVolumeResponse{
- Volume: &csi.Volume{
- CapacityBytes: volSize,
- VolumeId: accessPointsOptions.FileSystemId + "::" + accessPoint.AccessPointId,
- VolumeContext: volContext,
- },
+ Volume: volume,
}, nil
}
func (d *Driver) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
- var (
- localCloud cloud.Cloud
- roleArn string
- crossAccountDNSEnabled bool
- err error
- )
-
- localCloud, roleArn, crossAccountDNSEnabled, err = getCloud(req.GetSecrets(), d)
- if err != nil {
- return nil, err
- }
-
- klog.V(4).Infof("DeleteVolume: called with args %+v", util.SanitizeRequest(*req))
+ klog.V(4).Infof("DeleteVolume: called with args %+v", *req)
volId := req.GetVolumeId()
if volId == "" {
return nil, status.Error(codes.InvalidArgument, "Volume ID not provided")
}
- fileSystemId, _, accessPointId, err := parseVolumeId(volId)
+ _, subpath, accessPointId, err := parseVolumeId(volId)
if err != nil {
//Returning success for an invalid volume ID. See here - https://github.com/kubernetes-csi/csi-test/blame/5deb83d58fea909b2895731d43e32400380aae3c/pkg/sanity/controller.go#L733
klog.V(5).Infof("DeleteVolume: Failed to parse volumeID: %v, err: %v, returning success", volId, err)
return &csi.DeleteVolumeResponse{}, nil
}
- if accessPointId == "" {
- klog.V(5).Infof("DeleteVolume: No Access Point for volume %v, returning success", volId)
- return &csi.DeleteVolumeResponse{}, nil
- }
-
- // Lock on the access point ID to ensure a retry won't race with the in-progress deletion
- if d.lockManager.lockMutex(accessPointId, ApLockWaitTimeSec*time.Second) {
- defer d.lockManager.unlockMutex(accessPointId)
+ var provisioningMode string
+ if accessPointId != "" {
+ provisioningMode = AccessPointMode
+ } else if subpath != "" {
+ provisioningMode = DirectoryMode
} else {
- return nil, status.Errorf(codes.Internal, "Could not take the lock to delete access point: %v", accessPointId)
- }
-
- //TODO: Add Delete File System when FS provisioning is implemented
- // Delete access point root directory if delete-access-point-root-dir is set.
- if d.deleteAccessPointRootDir {
- fsRoot := TempMountPathPrefix + "/" + accessPointId
- deleteCompleted := false
-
- // Ensure the volume is cleaned up properly in case of an incomplete deletion
- defer func() {
- if !deleteCompleted {
- // Check if the FS is still mounted
- isNotMounted, err := d.mounter.IsLikelyNotMountPoint(fsRoot)
- if err != nil {
- return // Skip cleanup, we can't verify mount status
- }
-
- if !isNotMounted {
- if err := d.mounter.Unmount(fsRoot); err != nil {
- klog.Warningf("Failed to unmount %v: %v", fsRoot, err)
- return // Don't remove any data if the unmount fails
- }
- }
-
- // Only try folder removal if the unmount succeeded or wasn't mounted
- // If the directory already doesn't exist it will be treated as success
- if err := os.Remove(fsRoot); err != nil && !os.IsNotExist(err) {
- klog.Warningf("Failed to remove %v: %v", fsRoot, err)
- }
- }
- }()
-
- // Check if Access point exists.
- // If access point exists, retrieve its root directory and delete it/
- accessPoint, err := localCloud.DescribeAccessPoint(ctx, accessPointId)
- if err != nil {
- if err == cloud.ErrAccessDenied {
- return nil, status.Errorf(codes.Unauthenticated, "Access Denied. Please ensure you have the right AWS permissions: %v", err)
- }
- if err == cloud.ErrNotFound {
- klog.V(5).Infof("DeleteVolume: Access Point %v not found, returning success", accessPointId)
- return &csi.DeleteVolumeResponse{}, nil
- }
- return nil, status.Errorf(codes.Internal, "Could not get describe Access Point: %v , error: %v", accessPointId, err)
- }
-
- //Mount File System at it root and delete access point root directory
- mountOptions := []string{"tls", "iam"}
- if roleArn != "" {
- if crossAccountDNSEnabled {
- // Connect via dns rather than mounttargetip
- mountOptions = append(mountOptions, CrossAccount)
- } else {
- mountTarget, err := localCloud.DescribeMountTargets(ctx, fileSystemId, "")
- if err == nil {
- mountOptions = append(mountOptions, MountTargetIp+"="+mountTarget.IPAddress)
- } else {
- klog.Warningf("Failed to describe mount targets for file system %v. Skip using `mounttargetip` mount option: %v", fileSystemId, err)
- }
- }
- }
-
- // Create the target directory, This won't fail if it already exists
- if err := d.mounter.MakeDir(fsRoot); err != nil {
- return nil, status.Errorf(codes.Internal, "Could not create dir %q: %v", fsRoot, err)
- }
-
- // Only attempt to mount the target filesystem if its not already mounted
- isNotMounted, err := d.mounter.IsLikelyNotMountPoint(fsRoot)
- if err != nil {
- return nil, status.Errorf(codes.Internal, "Could not check if %q is mounted: %v", fsRoot, err)
- }
- if isNotMounted {
- if err := d.mounter.Mount(fileSystemId, fsRoot, "efs", mountOptions); err != nil {
- return nil, status.Errorf(codes.Internal, "Could not mount %q at %q: %v", fileSystemId, fsRoot, err)
- }
- }
-
- // Before removing, ensure the removal path exists and is a directory
- apRootPath := fsRoot + accessPoint.AccessPointRootDir
- if pathInfo, err := d.mounter.Stat(apRootPath); err == nil && !os.IsNotExist(err) && pathInfo.IsDir() {
- err = os.RemoveAll(apRootPath)
- }
- if err != nil {
- return nil, status.Errorf(codes.Internal, "Could not delete access point root directory %q: %v", accessPoint.AccessPointRootDir, err)
- }
- err = d.mounter.Unmount(fsRoot)
- if err != nil {
- return nil, status.Errorf(codes.Internal, "Could not unmount %q: %v", fsRoot, err)
- }
- err = os.Remove(fsRoot)
- if err != nil && !os.IsNotExist(err) {
- return nil, status.Errorf(codes.Internal, "Could not delete %q: %v", fsRoot, err)
- }
-
- //Mark the delete as complete, Nothing needs cleanup in the deferred function
- deleteCompleted = true
+ klog.V(5).Infof("DeleteVolume: No Access Point or subpath for volume %v, returning success", volId)
+ return &csi.DeleteVolumeResponse{}, nil
}
- // Delete access point
- if err = localCloud.DeleteAccessPoint(ctx, accessPointId); err != nil {
- if err == cloud.ErrAccessDenied {
- return nil, status.Errorf(codes.Unauthenticated, "Access Denied. Please ensure you have the right AWS permissions: %v", err)
- }
- if err == cloud.ErrNotFound {
- klog.V(5).Infof("DeleteVolume: Access Point not found, returning success")
- return &csi.DeleteVolumeResponse{}, nil
- }
+ klog.V(5).Infof("DeleteVolume: provisioningMode %v", provisioningMode)
+ err = d.provisioners[provisioningMode].Delete(ctx, req)
+ if err != nil {
return nil, status.Errorf(codes.Internal, "Failed to Delete volume %v: %v", volId, err)
}
@@ -625,39 +227,6 @@ func (d *Driver) ControllerGetVolume(ctx context.Context, req *csi.ControllerGet
return nil, status.Error(codes.Unimplemented, "")
}
-func getCloud(secrets map[string]string, driver *Driver) (cloud.Cloud, string, bool, error) {
-
- var localCloud cloud.Cloud
- var roleArn string
- var crossAccountDNSEnabled bool
- var err error
-
- // Fetch aws role ARN for cross account mount from CSI secrets. Link to CSI secrets below
- // https://kubernetes-csi.github.io/docs/secrets-and-credentials.html#csi-operation-secrets
- if value, ok := secrets[RoleArn]; ok {
- roleArn = value
- }
- if value, ok := secrets[CrossAccount]; ok {
- crossAccountDNSEnabled, err = strconv.ParseBool(value)
- if err != nil {
- return nil, "", false, status.Error(codes.InvalidArgument, "crossaccount parameter must have boolean value.")
- }
- } else {
- crossAccountDNSEnabled = false
- }
-
- if roleArn != "" {
- localCloud, err = cloud.NewCloudWithRole(roleArn, driver.adaptiveRetryMode)
- if err != nil {
- return nil, "", false, status.Errorf(codes.Unauthenticated, "Unable to initialize aws cloud: %v. Please verify role has the correct AWS permissions for cross account mount", err)
- }
- } else {
- localCloud = driver.cloud
- }
-
- return localCloud, roleArn, crossAccountDNSEnabled, nil
-}
-
func interpolateRootDirectoryName(rootDirectoryPath string, volumeParams map[string]string) (string, error) {
r := strings.NewReplacer(createListOfVariableSubstitutions(volumeParams)...)
result := r.Replace(rootDirectoryPath)
diff --git a/pkg/driver/controller_test.go b/pkg/driver/controller_test.go
index 1629f7dd6..fc8266759 100644
--- a/pkg/driver/controller_test.go
+++ b/pkg/driver/controller_test.go
@@ -25,13 +25,14 @@ import (
func TestCreateVolume(t *testing.T) {
var (
- endpoint = "endpoint"
- volumeName = "volumeName"
- fsId = "fs-abcd1234"
- apId = "fsap-abcd1234xyz987"
- volumeId = "fs-abcd1234::fsap-abcd1234xyz987"
- capacityRange int64 = 5368709120
- stdVolCap = &csi.VolumeCapability{
+ endpoint = "endpoint"
+ volumeName = "volumeName"
+ fsId = "fs-abcd1234"
+ apId = "fsap-abcd1234xyz987"
+ volumeId = "fs-abcd1234::fsap-abcd1234xyz987"
+ dirProvisioningVolumeId = "fs-abcd1234:/dynamic/" + volumeName
+ capacityRange int64 = 5368709120
+ stdVolCap = &csi.VolumeCapability{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
@@ -50,12 +51,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -116,12 +112,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -184,12 +175,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -267,12 +253,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -397,12 +378,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -488,12 +464,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -543,18 +514,12 @@ func TestCreateVolume(t *testing.T) {
},
},
{
- name: "Success: Normal flow",
+ name: "Success: Normal flow, Access Point Provisioning",
testFunc: func(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- tags: parseTagsFromStr(""),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -611,13 +576,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- tags: parseTagsFromStr(""),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -665,7 +624,8 @@ func TestCreateVolume(t *testing.T) {
// Lock the volume mutex to hold threads until they are all scheduled
for _, ap := range accessPointArr {
- driver.lockManager.lockMutex(ap.AccessPointId)
+ provisioner, _ := driver.provisioners["efs-ap"].(*AccessPointProvisioner)
+ provisioner.lockManager.lockMutex(ap.AccessPointId)
}
var wg sync.WaitGroup
@@ -690,7 +650,8 @@ func TestCreateVolume(t *testing.T) {
// Unlock the mutex to force a race
for _, ap := range accessPointArr {
- driver.lockManager.unlockMutex(ap.AccessPointId)
+ provisioner, _ := driver.provisioners["efs-ap"].(*AccessPointProvisioner)
+ provisioner.lockManager.unlockMutex(ap.AccessPointId)
}
go func() {
@@ -721,25 +682,66 @@ func TestCreateVolume(t *testing.T) {
}
// Ensure all keys were properly deleted from the lock manager
- keys, _ := driver.lockManager.GetLockCount()
+ provisioner, _ := driver.provisioners["efs-ap"].(*AccessPointProvisioner)
+ keys, _ := provisioner.lockManager.GetLockCount()
if keys > 0 {
t.Fatalf("%d Keys are still in the lockManager", keys)
}
mockCtl.Finish()
},
},
+ {
+ name: "Success: Normal flow, Directory Provisioning",
+ testFunc: func(t *testing.T) {
+ mockCtl := gomock.NewController(t)
+ mockMounter := mocks.NewMockMounter(mockCtl)
+ mockMounter.EXPECT().MakeDir(gomock.Any()).Return(nil)
+ mockMounter.EXPECT().Mount(fsId, gomock.Any(), "efs", gomock.Any()).Return(nil)
+ mockMounter.EXPECT().Unmount(gomock.Any()).Return(nil)
+
+ driver := buildDriver(endpoint, nil, "", mockMounter, false, false)
+
+ req := &csi.CreateVolumeRequest{
+ Name: volumeName,
+ VolumeCapabilities: []*csi.VolumeCapability{
+ stdVolCap,
+ },
+ CapacityRange: &csi.CapacityRange{
+ RequiredBytes: capacityRange,
+ },
+ Parameters: map[string]string{
+ ProvisioningMode: "efs-dir",
+ FsId: fsId,
+ DirectoryPerms: "777",
+ BasePath: "/dynamic",
+ },
+ }
+
+ ctx := context.Background()
+
+ res, err := driver.CreateVolume(ctx, req)
+
+ if err != nil {
+ t.Fatalf("CreateVolume failed: %v", err)
+ }
+
+ if res.Volume == nil {
+ t.Fatal("Volume is nil")
+ }
+
+ if res.Volume.VolumeId != dirProvisioningVolumeId {
+ t.Fatalf("Volume Id mismatched. Expected: %v, Actual: %v", dirProvisioningVolumeId, res.Volume.VolumeId)
+ }
+ mockCtl.Finish()
+ },
+ },
{
name: "Success: Using Default GID ranges",
testFunc: func(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -791,13 +793,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- tags: parseTagsFromStr("cluster:efs"),
- }
+ driver := buildDriver(endpoint, mockCloud, "cluster:efs", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -851,13 +847,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- tags: parseTagsFromStr("cluster-efs"),
- }
+ driver := buildDriver(endpoint, mockCloud, "cluster:efs", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -911,13 +901,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- tags: parseTagsFromStr(""),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
pvcNameVal := "test-pvc"
req := &csi.CreateVolumeRequest{
@@ -975,13 +959,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- tags: parseTagsFromStr(""),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
pvcNameVal := "test-pvc"
req := &csi.CreateVolumeRequest{
@@ -1044,13 +1022,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- tags: parseTagsFromStr(""),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
pvcNameVal := "test-pvc"
req := &csi.CreateVolumeRequest{
@@ -1086,7 +1058,8 @@ func TestCreateVolume(t *testing.T) {
mockCloud.EXPECT().FindAccessPointByClientToken(gomock.Eq(ctx), gomock.Any(), gomock.Eq(fsId)).Return(accessPoint, nil).Times(numGoRoutines)
// Lock the volume mutex to hold threads until they are all scheduled
- driver.lockManager.lockMutex(apId)
+ provisioner, _ := driver.provisioners["efs-ap"].(*AccessPointProvisioner)
+ provisioner.lockManager.lockMutex(apId)
var wg sync.WaitGroup
resultChan := make(chan struct {
@@ -1109,7 +1082,7 @@ func TestCreateVolume(t *testing.T) {
}
// Unlock the mutex to force a race
- driver.lockManager.unlockMutex(apId)
+ provisioner.lockManager.unlockMutex(apId)
go func() {
wg.Wait()
@@ -1131,7 +1104,7 @@ func TestCreateVolume(t *testing.T) {
}
// Ensure all keys were properly deleted from the lock manager
- keys, _ := driver.lockManager.GetLockCount()
+ keys, _ := provisioner.lockManager.GetLockCount()
if keys > 0 {
t.Fatalf("%d Keys are still in the lockManager", keys)
}
@@ -1146,13 +1119,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- tags: parseTagsFromStr(""),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
pvcNameVal := "test-pvc"
req := &csi.CreateVolumeRequest{
@@ -1188,7 +1155,9 @@ func TestCreateVolume(t *testing.T) {
mockCloud.EXPECT().FindAccessPointByClientToken(gomock.Eq(ctx), gomock.Any(), gomock.Eq(fsId)).Return(accessPoint, nil).Times(1)
// Lock the volume mutex to hold threads to force a timeout
- driver.lockManager.lockMutex(apId)
+
+ provisioner, _ := driver.provisioners["efs-ap"].(*AccessPointProvisioner)
+ provisioner.lockManager.lockMutex(apId)
start := time.Now()
resp, err := driver.CreateVolume(ctx, req)
@@ -1213,10 +1182,10 @@ func TestCreateVolume(t *testing.T) {
t.Fatal("Response should have been nil")
}
- driver.lockManager.unlockMutex(apId)
+ provisioner.lockManager.unlockMutex(apId)
// Ensure all keys were properly deleted from the lock manager even with a timeout
- keys, _ := driver.lockManager.GetLockCount()
+ keys, _ := provisioner.lockManager.GetLockCount()
if keys > 0 {
t.Fatalf("%d Keys are still in the lockManager", keys)
}
@@ -1230,13 +1199,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- tags: parseTagsFromStr(""),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
pvName := "foo"
pvcName := "bar"
@@ -1301,13 +1264,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- tags: parseTagsFromStr(""),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
pvcName := "foo"
directoryCreated := fmt.Sprintf("/%s", pvcName)
@@ -1370,13 +1327,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- tags: parseTagsFromStr(""),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
pvcName := "foo"
basePath := "bash"
@@ -1442,13 +1393,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- tags: parseTagsFromStr(""),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
pvcName := "foo"
basePath := "bash"
@@ -1515,13 +1460,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- tags: parseTagsFromStr(""),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
pvcName := "foo"
directoryCreated := fmt.Sprintf("/%s", pvcName)
@@ -1585,13 +1524,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- tags: parseTagsFromStr(""),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -1651,13 +1584,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- tags: parseTagsFromStr(""),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -1718,13 +1645,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- tags: parseTagsFromStr(""),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
pvcName := "foo"
directoryCreated := fmt.Sprintf("/%s/%s", pvcName, pvcName)
@@ -1787,11 +1708,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Parameters: map[string]string{
@@ -1815,11 +1732,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -1844,11 +1757,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -1876,11 +1785,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -1918,12 +1823,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- tags: parseTagsFromStr(""),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -1966,11 +1866,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -2008,11 +1904,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -2043,11 +1935,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -2077,11 +1965,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -2111,11 +1995,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -2146,11 +2026,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -2182,11 +2058,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -2218,11 +2090,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -2254,11 +2122,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -2290,11 +2154,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -2326,11 +2186,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -2362,11 +2218,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -2399,11 +2251,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -2436,11 +2284,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -2472,11 +2316,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -2508,11 +2348,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -2548,11 +2384,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -2586,11 +2418,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -2626,11 +2454,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -2664,11 +2488,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -2704,11 +2524,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -2742,11 +2558,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -2781,11 +2593,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -2820,11 +2628,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -2879,12 +2683,7 @@ func TestCreateVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- tags: parseTagsFromStr(""),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
secrets := map[string]string{}
secrets["awsRoleArn"] = "arn:aws:iam::1234567890:role/EFSCrossAccountRole"
@@ -2928,11 +2727,7 @@ func TestCreateVolume(t *testing.T) {
subPathPattern := "${.PVC.name}/${foo}"
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -2972,11 +2767,7 @@ func TestCreateVolume(t *testing.T) {
subPathPattern := "this-directory-name-is-far-too-long-for-any-practical-purposes-and-only-serves-to-prove-a-point"
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -3016,11 +2807,7 @@ func TestCreateVolume(t *testing.T) {
subPathPattern := "a/b/c/d/e/f"
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.CreateVolumeRequest{
Name: volumeName,
@@ -3061,12 +2848,13 @@ func TestCreateVolume(t *testing.T) {
func TestDeleteVolume(t *testing.T) {
var (
- apId = "fsap-abcd1234xyz987"
- apId2 = "fsap-abcd1234xyz988"
- fsId = "fs-abcd1234"
- endpoint = "endpoint"
- volumeId = "fs-abcd1234::fsap-abcd1234xyz987"
- volumeId2 = "fs-abcd1234::fsap-abcd1234xyz988"
+ apId = "fsap-abcd1234xyz987"
+ apId2 = "fsap-abcd1234xyz988"
+ fsId = "fs-abcd1234"
+ endpoint = "endpoint"
+ volumeId = "fs-abcd1234::fsap-abcd1234xyz987"
+ volumeId2 = "fs-abcd1234::fsap-abcd1234xyz988"
+ dirProvisionedVolumeId = "fs-abcd1234:/dynamic/newVolume"
)
testCases := []struct {
@@ -3074,17 +2862,12 @@ func TestDeleteVolume(t *testing.T) {
testFunc func(t *testing.T)
}{
{
- name: "Success: Normal flow",
+ name: "Success: Normal flow, Access Point Provisioning",
testFunc: func(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.DeleteVolumeRequest{
VolumeId: volumeId,
@@ -3099,6 +2882,26 @@ func TestDeleteVolume(t *testing.T) {
mockCtl.Finish()
},
},
+ {
+ name: "Success: Normal flow, Directory Provisioning",
+ testFunc: func(t *testing.T) {
+ mockCtl := gomock.NewController(t)
+ mockCloud := mocks.NewMockCloud(mockCtl)
+
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
+
+ req := &csi.DeleteVolumeRequest{
+ VolumeId: dirProvisionedVolumeId,
+ }
+
+ ctx := context.Background()
+ _, err := driver.DeleteVolume(ctx, req)
+ if err != nil {
+ t.Fatalf("Delete Volume failed: %v", err)
+ }
+ mockCtl.Finish()
+ },
+ },
{
name: "Success: Normal flow with deleteAccessPointRootDir",
testFunc: func(t *testing.T) {
@@ -3106,14 +2909,7 @@ func TestDeleteVolume(t *testing.T) {
mockCloud := mocks.NewMockCloud(mockCtl)
mockMounter := mocks.NewMockMounter(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- mounter: mockMounter,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- deleteAccessPointRootDir: true,
- }
+ driver := buildDriver(endpoint, mockCloud, "", mockMounter, true, false)
req := &csi.DeleteVolumeRequest{
VolumeId: volumeId,
@@ -3158,14 +2954,7 @@ func TestDeleteVolume(t *testing.T) {
mockCloud := mocks.NewMockCloud(mockCtl)
mockMounter := mocks.NewMockMounter(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- mounter: mockMounter,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- deleteAccessPointRootDir: true,
- }
+ driver := buildDriver(endpoint, mockCloud, "", mockMounter, true, false)
req := &csi.DeleteVolumeRequest{
VolumeId: volumeId,
@@ -3209,7 +2998,8 @@ func TestDeleteVolume(t *testing.T) {
}).Times(numGoRoutines)
// Lock the volume mutex to hold threads until they are all scheduled
- driver.lockManager.lockMutex(apId)
+ provisioner, _ := driver.provisioners["efs-ap"].(*AccessPointProvisioner)
+ provisioner.lockManager.lockMutex(apId)
var wg sync.WaitGroup
resultChan := make(chan struct {
@@ -3232,7 +3022,7 @@ func TestDeleteVolume(t *testing.T) {
}
// Unlock the mutex to force a race
- driver.lockManager.unlockMutex(apId)
+ provisioner.lockManager.unlockMutex(apId)
go func() {
wg.Wait()
@@ -3246,7 +3036,7 @@ func TestDeleteVolume(t *testing.T) {
}
// Ensure all keys were properly deleted from the lock manager
- keys, _ := driver.lockManager.GetLockCount()
+ keys, _ := provisioner.lockManager.GetLockCount()
if keys > 0 {
t.Fatalf("%d Keys are still in the lockManager", keys)
}
@@ -3262,14 +3052,7 @@ func TestDeleteVolume(t *testing.T) {
mockCloud := mocks.NewMockCloud(mockCtl)
mockMounter := mocks.NewMockMounter(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- mounter: mockMounter,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- deleteAccessPointRootDir: true,
- }
+ driver := buildDriver(endpoint, mockCloud, "", mockMounter, true, false)
req := &csi.DeleteVolumeRequest{
VolumeId: volumeId,
@@ -3335,8 +3118,9 @@ func TestDeleteVolume(t *testing.T) {
}).Times(numGoRoutines)
// Lock the volume mutex to hold threads until they are all scheduled
- driver.lockManager.lockMutex(apId)
- driver.lockManager.lockMutex(apId2)
+ provisioner, _ := driver.provisioners["efs-ap"].(*AccessPointProvisioner)
+ provisioner.lockManager.lockMutex(apId)
+ provisioner.lockManager.lockMutex(apId2)
var wg sync.WaitGroup
resultChan := make(chan struct {
@@ -3374,8 +3158,8 @@ func TestDeleteVolume(t *testing.T) {
}
// Unlock the mutex to force a race
- driver.lockManager.unlockMutex(apId)
- driver.lockManager.unlockMutex(apId2)
+ provisioner.lockManager.unlockMutex(apId)
+ provisioner.lockManager.unlockMutex(apId2)
go func() {
wg.Wait()
@@ -3389,7 +3173,7 @@ func TestDeleteVolume(t *testing.T) {
}
// Ensure all keys were properly deleted from the lock manager
- keys, _ := driver.lockManager.GetLockCount()
+ keys, _ := provisioner.lockManager.GetLockCount()
if keys > 0 {
t.Fatalf("%d Keys are still in the lockManager", keys)
}
@@ -3405,14 +3189,7 @@ func TestDeleteVolume(t *testing.T) {
mockCloud := mocks.NewMockCloud(mockCtl)
mockMounter := mocks.NewMockMounter(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- mounter: mockMounter,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- deleteAccessPointRootDir: true,
- }
+ driver := buildDriver(endpoint, mockCloud, "", mockMounter, true, false)
req := &csi.DeleteVolumeRequest{
VolumeId: volumeId,
@@ -3421,7 +3198,8 @@ func TestDeleteVolume(t *testing.T) {
ctx := context.Background()
// Lock the volume mutex to hold thread and force it to timeout
- driver.lockManager.lockMutex(apId)
+ provisioner, _ := driver.provisioners["efs-ap"].(*AccessPointProvisioner)
+ provisioner.lockManager.lockMutex(apId)
start := time.Now()
resp, err := driver.DeleteVolume(ctx, req)
@@ -3446,10 +3224,10 @@ func TestDeleteVolume(t *testing.T) {
t.Errorf("Expected err to not be nil, but got %v", err)
}
- driver.lockManager.unlockMutex(apId)
+ provisioner.lockManager.unlockMutex(apId)
// Ensure all keys were properly deleted from the lock manager even with a timeout
- keys, _ := driver.lockManager.GetLockCount()
+ keys, _ := provisioner.lockManager.GetLockCount()
if keys > 0 {
t.Fatalf("%d Keys are still in the lockManager", keys)
}
@@ -3458,21 +3236,38 @@ func TestDeleteVolume(t *testing.T) {
},
},
{
- name: "Success: DescribeAccessPoint Access Point Does not exist",
+ name: "Success: Normal flow, Directory Provisioning with deleteProvisionedDir",
testFunc: func(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
mockMounter := mocks.NewMockMounter(mockCtl)
+ mockMounter.EXPECT().MakeDir(gomock.Any()).Return(nil)
+ mockMounter.EXPECT().Mount(fsId, gomock.Any(), "efs", gomock.Any()).Return(nil)
+ mockMounter.EXPECT().Unmount(gomock.Any()).Return(nil)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- mounter: mockMounter,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- deleteAccessPointRootDir: true,
+ driver := buildDriver(endpoint, mockCloud, "", mockMounter, false, true)
+
+ req := &csi.DeleteVolumeRequest{
+ VolumeId: dirProvisionedVolumeId,
}
+ ctx := context.Background()
+ _, err := driver.DeleteVolume(ctx, req)
+ if err != nil {
+ t.Fatalf("Delete Volume failed: %v", err)
+ }
+ mockCtl.Finish()
+ },
+ },
+ {
+ name: "Success: DescribeAccessPoint Access Point Does not exist",
+ testFunc: func(t *testing.T) {
+ mockCtl := gomock.NewController(t)
+ mockCloud := mocks.NewMockCloud(mockCtl)
+ mockMounter := mocks.NewMockMounter(mockCtl)
+
+ driver := buildDriver(endpoint, mockCloud, "", mockMounter, true, false)
+
req := &csi.DeleteVolumeRequest{
VolumeId: volumeId,
}
@@ -3494,14 +3289,7 @@ func TestDeleteVolume(t *testing.T) {
mockCloud := mocks.NewMockCloud(mockCtl)
mockMounter := mocks.NewMockMounter(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- mounter: mockMounter,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- deleteAccessPointRootDir: true,
- }
+ driver := buildDriver(endpoint, mockCloud, "", mockMounter, true, false)
req := &csi.DeleteVolumeRequest{
VolumeId: volumeId,
@@ -3524,14 +3312,7 @@ func TestDeleteVolume(t *testing.T) {
mockCloud := mocks.NewMockCloud(mockCtl)
mockMounter := mocks.NewMockMounter(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- mounter: mockMounter,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- deleteAccessPointRootDir: true,
- }
+ driver := buildDriver(endpoint, mockCloud, "", mockMounter, true, false)
req := &csi.DeleteVolumeRequest{
VolumeId: volumeId,
@@ -3554,14 +3335,7 @@ func TestDeleteVolume(t *testing.T) {
mockCloud := mocks.NewMockCloud(mockCtl)
mockMounter := mocks.NewMockMounter(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- mounter: mockMounter,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- deleteAccessPointRootDir: true,
- }
+ driver := buildDriver(endpoint, mockCloud, "", mockMounter, true, false)
req := &csi.DeleteVolumeRequest{
VolumeId: volumeId,
@@ -3592,14 +3366,7 @@ func TestDeleteVolume(t *testing.T) {
mockCloud := mocks.NewMockCloud(mockCtl)
mockMounter := mocks.NewMockMounter(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- mounter: mockMounter,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- deleteAccessPointRootDir: true,
- }
+ driver := buildDriver(endpoint, mockCloud, "", mockMounter, true, false)
req := &csi.DeleteVolumeRequest{
VolumeId: volumeId,
@@ -3631,14 +3398,7 @@ func TestDeleteVolume(t *testing.T) {
mockCloud := mocks.NewMockCloud(mockCtl)
mockMounter := mocks.NewMockMounter(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- mounter: mockMounter,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- deleteAccessPointRootDir: true,
- }
+ driver := buildDriver(endpoint, mockCloud, "", mockMounter, true, false)
req := &csi.DeleteVolumeRequest{
VolumeId: volumeId,
@@ -3680,12 +3440,7 @@ func TestDeleteVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.DeleteVolumeRequest{
VolumeId: volumeId,
@@ -3700,18 +3455,29 @@ func TestDeleteVolume(t *testing.T) {
mockCtl.Finish()
},
},
+ {
+ name: "Success: Cannot parse details from volumeId",
+ testFunc: func(t *testing.T) {
+ driver := buildDriver(endpoint, nil, "", nil, false, true)
+
+ req := &csi.DeleteVolumeRequest{
+ VolumeId: "foobarbash",
+ }
+
+ ctx := context.Background()
+ _, err := driver.DeleteVolume(ctx, req)
+ if err != nil {
+ t.Fatalf("Expected success but found: %v", err)
+ }
+ },
+ },
{
name: "Fail: DeleteAccessPoint access denied",
testFunc: func(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.DeleteVolumeRequest{
VolumeId: volumeId,
@@ -3732,12 +3498,7 @@ func TestDeleteVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.DeleteVolumeRequest{
VolumeId: volumeId,
@@ -3758,12 +3519,7 @@ func TestDeleteVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.DeleteVolumeRequest{
VolumeId: "fs-abcd1234",
@@ -3783,13 +3539,7 @@ func TestDeleteVolume(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- tags: parseTagsFromStr(""),
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
secrets := map[string]string{}
secrets["awsRoleArn"] = "arn:aws:iam::1234567890:role/EFSCrossAccountRole"
@@ -3846,15 +3596,7 @@ func TestCreateDeleteVolumeRace(t *testing.T) {
mockCloud := mocks.NewMockCloud(mockCtl)
mockMounter := mocks.NewMockMounter(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- mounter: mockMounter,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- tags: parseTagsFromStr(""),
- deleteAccessPointRootDir: true,
- }
+ driver := buildDriver(endpoint, mockCloud, "", mockMounter, true, false)
pvcNameVal := "test-pvc"
createReq := &csi.CreateVolumeRequest{
@@ -3917,7 +3659,8 @@ func TestCreateDeleteVolumeRace(t *testing.T) {
mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Any()).Return(true, nil).Times(1)
// Lock the volume mutex to hold threads until they are all scheduled
- driver.lockManager.lockMutex(apId)
+ provisioner, _ := driver.provisioners["efs-ap"].(*AccessPointProvisioner)
+ provisioner.lockManager.lockMutex(apId)
var wg sync.WaitGroup
deleteResultChan := make(chan struct {
@@ -3958,7 +3701,7 @@ func TestCreateDeleteVolumeRace(t *testing.T) {
// Let the threads settle to force the race
time.Sleep(100 * time.Millisecond)
- driver.lockManager.unlockMutex(apId)
+ provisioner.lockManager.unlockMutex(apId)
go func() {
wg.Wait()
@@ -3985,7 +3728,7 @@ func TestCreateDeleteVolumeRace(t *testing.T) {
}
// Ensure all keys were properly deleted from the lock manager
- keys, _ := driver.lockManager.GetLockCount()
+ keys, _ := provisioner.lockManager.GetLockCount()
if keys > 0 {
t.Fatalf("%d Keys are still in the lockManager", keys)
}
@@ -3999,15 +3742,7 @@ func TestCreateDeleteVolumeRace(t *testing.T) {
mockCloud := mocks.NewMockCloud(mockCtl)
mockMounter := mocks.NewMockMounter(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- mounter: mockMounter,
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
- tags: parseTagsFromStr(""),
- deleteAccessPointRootDir: true,
- }
+ driver := buildDriver(endpoint, mockCloud, "", mockMounter, true, false)
pvcNameVal := "test-pvc"
createReq := &csi.CreateVolumeRequest{
@@ -4069,7 +3804,8 @@ func TestCreateDeleteVolumeRace(t *testing.T) {
mockCloud.EXPECT().DeleteAccessPoint(gomock.Eq(ctx), gomock.Eq(apId)).Return(nil).Times(1)
// Lock the volume mutex to hold threads until they are all scheduled
- driver.lockManager.lockMutex(apId)
+ provisioner, _ := driver.provisioners["efs-ap"].(*AccessPointProvisioner)
+ provisioner.lockManager.lockMutex(apId)
var wg sync.WaitGroup
createResultChan := make(chan struct {
@@ -4110,7 +3846,7 @@ func TestCreateDeleteVolumeRace(t *testing.T) {
// Let the threads settle to force the race
time.Sleep(100 * time.Millisecond)
- driver.lockManager.unlockMutex(apId)
+ provisioner.lockManager.unlockMutex(apId)
go func() {
wg.Wait()
@@ -4137,7 +3873,7 @@ func TestCreateDeleteVolumeRace(t *testing.T) {
}
// Ensure all keys were properly deleted from the lock manager
- keys, _ := driver.lockManager.GetLockCount()
+ keys, _ := provisioner.lockManager.GetLockCount()
if keys > 0 {
t.Fatalf("%d Keys are still in the lockManager", keys)
}
@@ -4182,10 +3918,7 @@ func TestValidateVolumeCapabilities(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.ValidateVolumeCapabilitiesRequest{
VolumeId: volumeId,
@@ -4212,10 +3945,7 @@ func TestValidateVolumeCapabilities(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.ValidateVolumeCapabilitiesRequest{
VolumeId: volumeId,
@@ -4242,10 +3972,7 @@ func TestValidateVolumeCapabilities(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.ValidateVolumeCapabilitiesRequest{
VolumeCapabilities: []*csi.VolumeCapability{
@@ -4267,10 +3994,7 @@ func TestValidateVolumeCapabilities(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
req := &csi.ValidateVolumeCapabilitiesRequest{
VolumeId: volumeId,
@@ -4284,6 +4008,20 @@ func TestValidateVolumeCapabilities(t *testing.T) {
mockCtl.Finish()
},
},
+ {
+ name: "Fail: volumeId not provided",
+ testFunc: func(t *testing.T) {
+ driver := buildDriver(endpoint, nil, "", nil, false, true)
+
+ req := &csi.DeleteVolumeRequest{}
+
+ ctx := context.Background()
+ _, err := driver.DeleteVolume(ctx, req)
+ if err == nil {
+ t.Fatal("Expected failure but operation succeeded")
+ }
+ },
+ },
}
for _, tc := range testCases {
@@ -4296,10 +4034,7 @@ func TestControllerGetCapabilities(t *testing.T) {
mockCtl := gomock.NewController(t)
mockCloud := mocks.NewMockCloud(mockCtl)
- driver := &Driver{
- endpoint: endpoint,
- cloud: mockCloud,
- }
+ driver := buildDriver(endpoint, mockCloud, "", nil, false, false)
ctx := context.Background()
_, err := driver.ControllerGetCapabilities(ctx, &csi.ControllerGetCapabilitiesRequest{})
@@ -4325,3 +4060,12 @@ func randStringBytes(n int) string {
}
return string(b)
}
+
+func buildDriver(endpoint string, cloud cloud.Cloud, tags string, mounter Mounter, deleteAccessPointRootDir bool, deleteProvisionedDir bool) *Driver {
+ return &Driver{
+ endpoint: endpoint,
+ cloud: cloud,
+ mounter: mounter,
+ provisioners: getProvisioners(cloud, mounter, parseTagsFromStr(tags), deleteAccessPointRootDir, false, &FakeOsClient{}, deleteProvisionedDir),
+ }
+}
diff --git a/pkg/driver/driver.go b/pkg/driver/driver.go
index f8d78dac6..f4fe967d8 100644
--- a/pkg/driver/driver.go
+++ b/pkg/driver/driver.go
@@ -38,25 +38,21 @@ const (
)
type Driver struct {
- endpoint string
- nodeID string
- srv *grpc.Server
- mounter Mounter
- efsWatchdog Watchdog
- cloud cloud.Cloud
- nodeCaps []csi.NodeServiceCapability_RPC_Type
- volMetricsOptIn bool
- volMetricsRefreshPeriod float64
- volMetricsFsRateLimit int
- volStatter VolStatter
- gidAllocator GidAllocator
- deleteAccessPointRootDir bool
- adaptiveRetryMode bool
- tags map[string]string
- lockManager LockManagerMap
+ endpoint string
+ nodeID string
+ srv *grpc.Server
+ mounter Mounter
+ efsWatchdog Watchdog
+ cloud cloud.Cloud
+ nodeCaps []csi.NodeServiceCapability_RPC_Type
+ volMetricsOptIn bool
+ volMetricsRefreshPeriod float64
+ volMetricsFsRateLimit int
+ volStatter VolStatter
+ provisioners map[string]Provisioner
}
-func NewDriver(endpoint, efsUtilsCfgPath, efsUtilsStaticFilesPath, tags string, volMetricsOptIn bool, volMetricsRefreshPeriod float64, volMetricsFsRateLimit int, deleteAccessPointRootDir bool, adaptiveRetryMode bool) *Driver {
+func NewDriver(endpoint, efsUtilsCfgPath, efsUtilsStaticFilesPath, tags string, volMetricsOptIn bool, volMetricsRefreshPeriod float64, volMetricsFsRateLimit int, deleteAccessPointRootDir bool, adaptiveRetryMode bool, deleteProvisionedDir bool) *Driver {
cloud, err := cloud.NewCloud(adaptiveRetryMode)
if err != nil {
klog.Fatalln(err)
@@ -64,22 +60,21 @@ func NewDriver(endpoint, efsUtilsCfgPath, efsUtilsStaticFilesPath, tags string,
nodeCaps := SetNodeCapOptInFeatures(volMetricsOptIn)
watchdog := newExecWatchdog(efsUtilsCfgPath, efsUtilsStaticFilesPath, "amazon-efs-mount-watchdog")
+ mounter := newNodeMounter()
+ parsedTags := parseTagsFromStr(strings.TrimSpace(tags))
+ provisioners := getProvisioners(cloud, mounter, parsedTags, deleteAccessPointRootDir, adaptiveRetryMode, &RealOsClient{}, deleteProvisionedDir)
return &Driver{
- endpoint: endpoint,
- nodeID: cloud.GetMetadata().GetInstanceID(),
- mounter: newNodeMounter(),
- efsWatchdog: watchdog,
- cloud: cloud,
- nodeCaps: nodeCaps,
- volStatter: NewVolStatter(),
- volMetricsOptIn: volMetricsOptIn,
- volMetricsRefreshPeriod: volMetricsRefreshPeriod,
- volMetricsFsRateLimit: volMetricsFsRateLimit,
- gidAllocator: NewGidAllocator(),
- deleteAccessPointRootDir: deleteAccessPointRootDir,
- adaptiveRetryMode: adaptiveRetryMode,
- tags: parseTagsFromStr(strings.TrimSpace(tags)),
- lockManager: NewLockManagerMap(),
+ endpoint: endpoint,
+ nodeID: cloud.GetMetadata().GetInstanceID(),
+ mounter: newNodeMounter(),
+ efsWatchdog: watchdog,
+ cloud: cloud,
+ nodeCaps: nodeCaps,
+ volStatter: NewVolStatter(),
+ volMetricsOptIn: volMetricsOptIn,
+ volMetricsRefreshPeriod: volMetricsRefreshPeriod,
+ volMetricsFsRateLimit: volMetricsFsRateLimit,
+ provisioners: provisioners,
}
}
@@ -142,6 +137,14 @@ func (d *Driver) Run() error {
return d.srv.Serve(listener)
}
+func (d *Driver) getProvisioningModes() []string {
+ var keys []string
+ for k := range d.provisioners {
+ keys = append(keys, k)
+ }
+ return keys
+}
+
func parseTagsFromStr(tagStr string) map[string]string {
defer func() {
if r := recover(); r != nil {
diff --git a/pkg/driver/gid_allocator.go b/pkg/driver/gid_allocator.go
index 97348a2c4..961239f82 100644
--- a/pkg/driver/gid_allocator.go
+++ b/pkg/driver/gid_allocator.go
@@ -20,8 +20,8 @@ type GidAllocator struct {
mu sync.Mutex
}
-func NewGidAllocator() GidAllocator {
- return GidAllocator{}
+func NewGidAllocator() *GidAllocator {
+ return &GidAllocator{}
}
// Retrieves the next available GID
diff --git a/pkg/driver/os_client.go b/pkg/driver/os_client.go
new file mode 100644
index 000000000..435abd28c
--- /dev/null
+++ b/pkg/driver/os_client.go
@@ -0,0 +1,113 @@
+package driver
+
+import "os"
+
+type OsClient interface {
+ MkDirAllWithPerms(path string, perms os.FileMode, uid, gid int) error
+ MkDirAllWithPermsNoOwnership(path string, perms os.FileMode) error
+ GetPerms(path string) (os.FileMode, error)
+ Remove(path string) error
+ RemoveAll(path string) error
+}
+
+// Real OsClient
+
+type RealOsClient struct{}
+
+func NewOsClient() *RealOsClient {
+ return &RealOsClient{}
+}
+
+func (o *RealOsClient) MkDirAllWithPerms(path string, perms os.FileMode, uid, gid int) error {
+ err := os.MkdirAll(path, perms)
+ if err != nil {
+ return err
+ }
+ // Extra CHMOD guarantees we get the permissions we desire, inspite of the umask
+ err = os.Chmod(path, perms)
+ if err != nil {
+ return err
+ }
+ err = os.Chown(path, uid, gid)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (o *RealOsClient) MkDirAllWithPermsNoOwnership(path string, perms os.FileMode) error {
+ err := os.MkdirAll(path, perms)
+ if err != nil {
+ return err
+ }
+ // Extra CHMOD guarantees we get the permissions we desire, inspite of the umask
+ err = os.Chmod(path, perms)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (o *RealOsClient) Remove(path string) error {
+ return os.Remove(path)
+}
+
+func (o *RealOsClient) RemoveAll(path string) error {
+ return os.RemoveAll(path)
+}
+
+func (o *RealOsClient) GetPerms(path string) (os.FileMode, error) {
+ fInfo, err := os.Stat(path)
+ if err != nil {
+ return 0, err
+ }
+ return fInfo.Mode(), nil
+}
+
+// Fake OsClient
+
+type FakeOsClient struct{}
+
+func (o *FakeOsClient) MkDirAllWithPerms(_ string, _ os.FileMode, _, _ int) error {
+ return nil
+}
+
+func (o *FakeOsClient) MkDirAllWithPermsNoOwnership(_ string, _ os.FileMode) error {
+ return nil
+}
+
+func (o *FakeOsClient) Remove(_ string) error {
+ return nil
+}
+
+func (o *FakeOsClient) RemoveAll(_ string) error {
+ return nil
+}
+
+func (o *FakeOsClient) GetPerms(_ string) (os.FileMode, error) {
+ return 0, nil
+}
+
+// Broken OsClient
+
+type BrokenOsClient struct{}
+
+func (o *BrokenOsClient) MkDirAllWithPerms(_ string, _ os.FileMode, _, _ int) error {
+ return &os.PathError{}
+}
+
+func (o *BrokenOsClient) MkDirAllWithPermsNoOwnership(_ string, _ os.FileMode) error {
+ return &os.PathError{}
+}
+
+func (o *BrokenOsClient) Remove(_ string) error {
+ return &os.PathError{}
+}
+
+func (o *BrokenOsClient) RemoveAll(_ string) error {
+ return &os.PathError{}
+}
+
+func (o *BrokenOsClient) GetPerms(path string) (os.FileMode, error) {
+ return 0, &os.PathError{}
+}
diff --git a/pkg/driver/provisioner.go b/pkg/driver/provisioner.go
new file mode 100644
index 000000000..f04d27ee2
--- /dev/null
+++ b/pkg/driver/provisioner.go
@@ -0,0 +1,80 @@
+package driver
+
+import (
+ "context"
+ "strconv"
+
+ "github.com/container-storage-interface/spec/lib/go/csi"
+ "github.com/kubernetes-sigs/aws-efs-csi-driver/pkg/cloud"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+type Provisioner interface {
+ Provision(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.Volume, error)
+ Delete(ctx context.Context, req *csi.DeleteVolumeRequest) error
+}
+
+type BaseProvisioner struct {
+ cloud cloud.Cloud
+ mounter Mounter
+ adaptiveRetryMode bool
+ lockManager LockManagerMap
+}
+
+func getProvisioners(cloud cloud.Cloud, mounter Mounter, tags map[string]string, deleteAccessPointRootDir bool, adaptiveRetryMode bool, osClient OsClient, deleteProvisionedDir bool) map[string]Provisioner {
+ return map[string]Provisioner{
+ AccessPointMode: &AccessPointProvisioner{
+ BaseProvisioner: BaseProvisioner{
+ cloud: cloud,
+ mounter: mounter,
+ adaptiveRetryMode: adaptiveRetryMode,
+ lockManager: NewLockManagerMap(),
+ },
+ tags: tags,
+ gidAllocator: NewGidAllocator(),
+ deleteAccessPointRootDir: deleteAccessPointRootDir,
+ },
+ DirectoryMode: &DirectoryProvisioner{
+ BaseProvisioner: BaseProvisioner{
+ cloud: cloud,
+ mounter: mounter,
+ },
+ osClient: osClient,
+ deleteProvisionedDir: deleteProvisionedDir,
+ },
+ }
+}
+
+func getCloud(secrets map[string]string, driverCloud cloud.Cloud, adaptiveRetryMode bool) (cloud.Cloud, string, bool, error) {
+
+ var localCloud cloud.Cloud
+ var roleArn string
+ var crossAccountDNSEnabled bool
+ var err error
+
+ // Fetch aws role ARN for cross account mount from CSI secrets. Link to CSI secrets below
+ // https://kubernetes-csi.github.io/docs/secrets-and-credentials.html#csi-operation-secrets
+ if value, ok := secrets[RoleArn]; ok {
+ roleArn = value
+ }
+ if value, ok := secrets[CrossAccount]; ok {
+ crossAccountDNSEnabled, err = strconv.ParseBool(value)
+ if err != nil {
+ return nil, "", false, status.Error(codes.InvalidArgument, "crossaccount parameter must have boolean value.")
+ }
+ } else {
+ crossAccountDNSEnabled = false
+ }
+
+ if roleArn != "" {
+ localCloud, err = cloud.NewCloudWithRole(roleArn, adaptiveRetryMode)
+ if err != nil {
+ return nil, "", false, status.Errorf(codes.Unauthenticated, "Unable to initialize aws cloud: %v. Please verify role has the correct AWS permissions for cross account mount", err)
+ }
+ } else {
+ localCloud = driverCloud
+ }
+
+ return localCloud, roleArn, crossAccountDNSEnabled, nil
+}
diff --git a/pkg/driver/provisioner_ap.go b/pkg/driver/provisioner_ap.go
new file mode 100644
index 000000000..193c93c38
--- /dev/null
+++ b/pkg/driver/provisioner_ap.go
@@ -0,0 +1,447 @@
+package driver
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/google/uuid"
+
+ "github.com/container-storage-interface/spec/lib/go/csi"
+ "github.com/kubernetes-sigs/aws-efs-csi-driver/pkg/cloud"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+ "k8s.io/klog/v2"
+)
+
+type AccessPointProvisioner struct {
+ BaseProvisioner
+ tags map[string]string
+ gidAllocator *GidAllocator
+ deleteAccessPointRootDir bool
+}
+
+func (a *AccessPointProvisioner) Provision(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.Volume, error) {
+ var reuseAccessPoint bool
+ var err error
+ volumeParams := req.GetParameters()
+ volName := req.GetName()
+ clientToken := volName
+
+ // if true, then use sha256 hash of pvcName as clientToken instead of PVC Id
+ // This allows users to reconnect to the same AP from different k8s cluster
+ if reuseAccessPointStr, ok := volumeParams[ReuseAccessPointKey]; ok {
+ reuseAccessPoint, err = strconv.ParseBool(reuseAccessPointStr)
+ if err != nil {
+ return nil, status.Error(codes.InvalidArgument, "Invalid value for reuseAccessPoint parameter")
+ }
+ if reuseAccessPoint {
+ clientToken = get64LenHash(volumeParams[PvcNameKey])
+ klog.V(5).Infof("Client token : %s", clientToken)
+ }
+ }
+ if volName == "" {
+ return nil, status.Error(codes.InvalidArgument, "Volume name not provided")
+ }
+
+ // Volume size is required to match PV to PVC by k8s.
+ // Volume size is not consumed by EFS for any purposes.
+ volSize := req.GetCapacityRange().GetRequiredBytes()
+
+ var (
+ azName string
+ basePath string
+ gid int64
+ gidMin int64
+ gidMax int64
+ localCloud cloud.Cloud
+ roleArn string
+ uid int64
+ crossAccountDNSEnabled bool
+ )
+
+ accessPointsOptions := &cloud.AccessPointOptions{
+ CapacityGiB: volSize,
+ }
+
+ if value, ok := volumeParams[FsId]; ok {
+ if strings.TrimSpace(value) == "" {
+ return nil, status.Errorf(codes.InvalidArgument, "Parameter %v cannot be empty", FsId)
+ }
+ accessPointsOptions.FileSystemId = value
+ } else {
+ return nil, status.Errorf(codes.InvalidArgument, "Missing %v parameter", FsId)
+ }
+
+ localCloud, roleArn, crossAccountDNSEnabled, err = getCloud(req.GetSecrets(), a.cloud, a.adaptiveRetryMode)
+ if err != nil {
+ return nil, err
+ }
+
+ var accessPoint *cloud.AccessPoint
+ //if reuseAccessPoint is true, check for AP with same Root Directory exists in efs
+ // if found reuse that AP
+ if reuseAccessPoint {
+ existingAP, err := localCloud.FindAccessPointByClientToken(ctx, clientToken, accessPointsOptions.FileSystemId)
+ if err != nil {
+ return nil, fmt.Errorf("failed to find access point: %v", err)
+ }
+ if existingAP != nil {
+ //AP path already exists
+ klog.V(2).Infof("Existing AccessPoint found : %+v", existingAP)
+ accessPoint = &cloud.AccessPoint{
+ AccessPointId: existingAP.AccessPointId,
+ FileSystemId: existingAP.FileSystemId,
+ CapacityGiB: accessPointsOptions.CapacityGiB,
+ }
+
+ // Take the lock to prevent this access point from being deleted while creating volume
+ if a.lockManager.lockMutex(accessPoint.AccessPointId, ApLockWaitTimeSec*time.Second) {
+ defer a.lockManager.unlockMutex(accessPoint.AccessPointId)
+ } else {
+ return nil, status.Errorf(codes.Internal, "Could not take the lock for existing access point: %v", accessPoint.AccessPointId)
+ }
+ }
+ }
+
+ if accessPoint == nil {
+ // Create tags
+ tags := map[string]string{
+ DefaultTagKey: DefaultTagValue,
+ }
+
+ // Append input tags to default tag
+ if len(a.tags) != 0 {
+ for k, v := range a.tags {
+ tags[k] = v
+ }
+ }
+
+ accessPointsOptions.Tags = tags
+
+ uid = -1
+ if value, ok := volumeParams[Uid]; ok {
+ uid, err = strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ return nil, status.Errorf(codes.InvalidArgument, "Failed to parse invalid %v: %v", Uid, err)
+ }
+ if uid < 0 {
+ return nil, status.Errorf(codes.InvalidArgument, "%v must be greater or equal than 0", Uid)
+ }
+ }
+
+ gid = -1
+ if value, ok := volumeParams[Gid]; ok {
+ gid, err = strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ return nil, status.Errorf(codes.InvalidArgument, "Failed to parse invalid %v: %v", Gid, err)
+ }
+ if gid < 0 {
+ return nil, status.Errorf(codes.InvalidArgument, "%v must be greater or equal than 0", Gid)
+ }
+ }
+
+ if value, ok := volumeParams[GidMin]; ok {
+ gidMin, err = strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ return nil, status.Errorf(codes.InvalidArgument, "Failed to parse invalid %v: %v", GidMin, err)
+ }
+ if gidMin <= 0 {
+ return nil, status.Errorf(codes.InvalidArgument, "%v must be greater than 0", GidMin)
+ }
+ }
+
+ if value, ok := volumeParams[GidMax]; ok {
+ // Ensure GID min is provided with GID max
+ if gidMin == 0 {
+ return nil, status.Errorf(codes.InvalidArgument, "Missing %v parameter", GidMin)
+ }
+ gidMax, err = strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ return nil, status.Errorf(codes.InvalidArgument, "Failed to parse invalid %v: %v", GidMax, err)
+ }
+ if gidMax <= gidMin {
+ return nil, status.Errorf(codes.InvalidArgument, "%v must be greater than %v", GidMax, GidMin)
+ }
+ } else {
+ // Ensure GID max is provided with GID min
+ if gidMin != 0 {
+ return nil, status.Errorf(codes.InvalidArgument, "Missing %v parameter", GidMax)
+ }
+ }
+
+ // Assign default GID ranges if not provided
+ if gidMin == 0 && gidMax == 0 {
+ gidMin = DefaultGidMin
+ gidMax = DefaultGidMax
+ }
+
+ if value, ok := volumeParams[DirectoryPerms]; ok {
+ accessPointsOptions.DirectoryPerms = value
+ }
+
+ // Storage class parameter `az` will be used to fetch preferred mount target for cross account mount.
+ // If the `az` storage class parameter is not provided, a random mount target will be picked for mounting.
+ // This storage class parameter different from `az` mount option provided by efs-utils https://github.com/aws/efs-utils/blob/v1.31.1/src/mount_efs/__init__.py#L195
+ // The `az` mount option provided by efs-utils is used for cross az mount or to provide az of efs one zone file system mount within the same aws-account.
+ // To make use of the `az` mount option, add it under storage class's `mountOptions` section. https://kubernetes.io/docs/concepts/storage/storage-classes/#mount-options
+ if value, ok := volumeParams[AzName]; ok {
+ azName = value
+ }
+
+ // Check if file system exists. Describe FS or List APs handle appropriate error codes
+ // With dynamic uid/gid provisioning we can save a call to describe FS, as list APs fails if FS ID does not exist
+ var accessPoints []*cloud.AccessPoint
+ if uid == -1 || gid == -1 {
+ accessPoints, err = localCloud.ListAccessPoints(ctx, accessPointsOptions.FileSystemId)
+ } else {
+ _, err = localCloud.DescribeFileSystem(ctx, accessPointsOptions.FileSystemId)
+ }
+ if err != nil {
+ if err == cloud.ErrAccessDenied {
+ return nil, status.Errorf(codes.Unauthenticated, "Access Denied. Please ensure you have the right AWS permissions: %v", err)
+ }
+ if err == cloud.ErrNotFound {
+ return nil, status.Errorf(codes.InvalidArgument, "File System does not exist: %v", err)
+ }
+ return nil, status.Errorf(codes.Internal, "Failed to fetch Access Points or Describe File System: %v", err)
+ }
+
+ var allocatedGid int64
+ if uid == -1 || gid == -1 {
+ allocatedGid, err = a.gidAllocator.getNextGid(accessPointsOptions.FileSystemId, accessPoints, gidMin, gidMax)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if uid == -1 {
+ uid = allocatedGid
+ }
+ if gid == -1 {
+ gid = allocatedGid
+ }
+
+ if value, ok := volumeParams[BasePath]; ok {
+ basePath = value
+ }
+
+ rootDirName := volName
+ // Check if a custom structure should be imposed on the access point directory
+ if value, ok := volumeParams[SubPathPattern]; ok {
+ // Try and construct the root directory and check it only contains supported components
+ val, err := interpolateRootDirectoryName(value, volumeParams)
+ if err == nil {
+ klog.Infof("Using user-specified structure for access point directory.")
+ rootDirName = val
+ if value, ok := volumeParams[EnsureUniqueDirectory]; ok {
+ if ensureUniqueDirectory, err := strconv.ParseBool(value); !ensureUniqueDirectory && err == nil {
+ klog.Infof("Not appending PVC UID to path.")
+ } else {
+ klog.Infof("Appending PVC UID to path.")
+ rootDirName = fmt.Sprintf("%s-%s", val, uuid.New().String())
+ }
+ } else {
+ klog.Infof("Appending PVC UID to path.")
+ rootDirName = fmt.Sprintf("%s-%s", val, uuid.New().String())
+ }
+ } else {
+ return nil, err
+ }
+ } else {
+ klog.Infof("Using PV name for access point directory.")
+ }
+
+ rootDir := path.Join("/", basePath, rootDirName)
+ if ok, err := validateEfsPathRequirements(rootDir); !ok {
+ return nil, err
+ }
+ klog.Infof("Using %v as the access point directory.", rootDir)
+
+ accessPointsOptions.Uid = uid
+ accessPointsOptions.Gid = gid
+ accessPointsOptions.DirectoryPath = rootDir
+
+ accessPoint, err = localCloud.CreateAccessPoint(ctx, clientToken, accessPointsOptions)
+ if err != nil {
+ if err == cloud.ErrAccessDenied {
+ return nil, status.Errorf(codes.Unauthenticated, "Access Denied. Please ensure you have the right AWS permissions: %v", err)
+ }
+ if err == cloud.ErrAlreadyExists {
+ return nil, status.Errorf(codes.AlreadyExists, "Access Point already exists")
+ }
+ return nil, status.Errorf(codes.Internal, "Failed to create Access point in File System %v : %v", accessPointsOptions.FileSystemId, err)
+ }
+
+ // Lock on the new access point to prevent accidental deletion before creation is done
+ if a.lockManager.lockMutex(accessPoint.AccessPointId, ApLockWaitTimeSec*time.Second) {
+ defer a.lockManager.unlockMutex(accessPoint.AccessPointId)
+ } else {
+ return nil, status.Errorf(codes.Internal, "Could not take the lock after creating access point: %v", accessPoint.AccessPointId)
+ }
+ }
+
+ volContext := map[string]string{}
+
+ // Enable cross-account dns resolution or fetch mount target Ip for cross-account mount
+ if roleArn != "" {
+ if crossAccountDNSEnabled {
+ // This option indicates the customer would like to use DNS to resolve
+ // the cross-account mount target ip address (in order to mount to
+ // the same AZ-ID as the client instance); mounttargetip should
+ // not be used as a mount option in this case.
+ volContext[CrossAccount] = strconv.FormatBool(true)
+ } else {
+ mountTarget, err := localCloud.DescribeMountTargets(ctx, accessPointsOptions.FileSystemId, azName)
+ if err != nil {
+ klog.Warningf("Failed to describe mount targets for file system %v. Skip using `mounttargetip` mount option: %v", accessPointsOptions.FileSystemId, err)
+ } else {
+ volContext[MountTargetIp] = mountTarget.IPAddress
+ }
+
+ }
+ }
+
+ return &csi.Volume{
+ CapacityBytes: volSize,
+ VolumeId: accessPointsOptions.FileSystemId + "::" + accessPoint.AccessPointId,
+ VolumeContext: volContext,
+ }, nil
+}
+
+func (a *AccessPointProvisioner) Delete(ctx context.Context, req *csi.DeleteVolumeRequest) error {
+ var (
+ localCloud cloud.Cloud
+ roleArn string
+ crossAccountDNSEnabled bool
+ err error
+ )
+
+ localCloud, roleArn, crossAccountDNSEnabled, err = getCloud(req.GetSecrets(), a.cloud, a.adaptiveRetryMode)
+ if err != nil {
+ return err
+ }
+
+ volId := req.GetVolumeId()
+ fileSystemId, _, accessPointId, _ := parseVolumeId(volId)
+
+ // Lock on the access point ID to ensure a retry won't race with the in-progress deletion
+ if a.lockManager.lockMutex(accessPointId, ApLockWaitTimeSec*time.Second) {
+ defer a.lockManager.unlockMutex(accessPointId)
+ } else {
+ return status.Errorf(codes.Internal, "Could not take the lock to delete access point: %v", accessPointId)
+ }
+
+ // Delete access point root directory if delete-access-point-root-dir is set.
+ if a.deleteAccessPointRootDir {
+ fsRoot := TempMountPathPrefix + "/" + accessPointId
+ deleteCompleted := false
+
+ // Ensure the volume is cleaned up properly in case of an incomplete deletion
+ defer func() {
+ if !deleteCompleted {
+ // Check if the FS is still mounted
+ isNotMounted, err := a.mounter.IsLikelyNotMountPoint(fsRoot)
+ if err != nil {
+ return // Skip cleanup, we can't verify mount status
+ }
+
+ if !isNotMounted {
+ if err := a.mounter.Unmount(fsRoot); err != nil {
+ klog.Warningf("Failed to unmount %v: %v", fsRoot, err)
+ return // Don't remove any data if the unmount fails
+ }
+ }
+
+ // Only try folder removal if the unmount succeeded or wasn't mounted
+ // If the directory already doesn't exist it will be treated as success
+ if err := os.Remove(fsRoot); err != nil && !os.IsNotExist(err) {
+ klog.Warningf("Failed to remove %v: %v", fsRoot, err)
+ }
+ }
+ }()
+
+ // Check if Access point exists.
+ // If access point exists, retrieve its root directory and delete it/
+ accessPoint, err := localCloud.DescribeAccessPoint(ctx, accessPointId)
+ if err != nil {
+ if err == cloud.ErrAccessDenied {
+ return status.Errorf(codes.Unauthenticated, "Access Denied. Please ensure you have the right AWS permissions: %v", err)
+ }
+ if err == cloud.ErrNotFound {
+ klog.V(5).Infof("DeleteVolume: Access Point %v not found, returning success", accessPointId)
+ return nil
+ }
+ return status.Errorf(codes.Internal, "Could not get describe Access Point: %v , error: %v", accessPointId, err)
+ }
+
+ //Mount File System at it root and delete access point root directory
+ mountOptions := []string{"tls", "iam"}
+ if roleArn != "" {
+ if crossAccountDNSEnabled {
+ // Connect via dns rather than mounttargetip
+ mountOptions = append(mountOptions, CrossAccount)
+ } else {
+ mountTarget, err := localCloud.DescribeMountTargets(ctx, fileSystemId, "")
+ if err == nil {
+ mountOptions = append(mountOptions, MountTargetIp+"="+mountTarget.IPAddress)
+ } else {
+ klog.Warningf("Failed to describe mount targets for file system %v. Skip using `mounttargetip` mount option: %v", fileSystemId, err)
+ }
+ }
+ }
+
+ // Create the target directory, This won't fail if it already exists
+ if err := a.mounter.MakeDir(fsRoot); err != nil {
+ return status.Errorf(codes.Internal, "Could not create dir %q: %v", fsRoot, err)
+ }
+
+ // Only attempt to mount the target filesystem if its not already mounted
+ isNotMounted, err := a.mounter.IsLikelyNotMountPoint(fsRoot)
+ if err != nil {
+ return status.Errorf(codes.Internal, "Could not check if %q is mounted: %v", fsRoot, err)
+ }
+ if isNotMounted {
+ if err := a.mounter.Mount(fileSystemId, fsRoot, "efs", mountOptions); err != nil {
+ return status.Errorf(codes.Internal, "Could not mount %q at %q: %v", fileSystemId, fsRoot, err)
+ }
+ }
+
+ // Before removing, ensure the removal path exists and is a directory
+ apRootPath := fsRoot + accessPoint.AccessPointRootDir
+ if pathInfo, err := a.mounter.Stat(apRootPath); err == nil && !os.IsNotExist(err) && pathInfo.IsDir() {
+ err = os.RemoveAll(apRootPath)
+ }
+ if err != nil {
+ return status.Errorf(codes.Internal, "Could not delete access point root directory %q: %v", accessPoint.AccessPointRootDir, err)
+ }
+ err = a.mounter.Unmount(fsRoot)
+ if err != nil {
+ return status.Errorf(codes.Internal, "Could not unmount %q: %v", fsRoot, err)
+ }
+ err = os.Remove(fsRoot)
+ if err != nil && !os.IsNotExist(err) {
+ return status.Errorf(codes.Internal, "Could not delete %q: %v", fsRoot, err)
+ }
+
+ //Mark the delete as complete, Nothing needs cleanup in the deferred function
+ deleteCompleted = true
+ }
+
+ // Delete access point
+ if err = localCloud.DeleteAccessPoint(ctx, accessPointId); err != nil {
+ if err == cloud.ErrAccessDenied {
+ return status.Errorf(codes.Unauthenticated, "Access Denied. Please ensure you have the right AWS permissions: %v", err)
+ }
+ if err == cloud.ErrNotFound {
+ klog.V(5).Infof("DeleteVolume: Access Point not found, returning success")
+ return nil
+ }
+ return status.Errorf(codes.Internal, "Failed to Delete volume %v: %v", volId, err)
+ }
+
+ return nil
+}
diff --git a/pkg/driver/provisioner_ap_test.go b/pkg/driver/provisioner_ap_test.go
new file mode 100644
index 000000000..9286c4917
--- /dev/null
+++ b/pkg/driver/provisioner_ap_test.go
@@ -0,0 +1,670 @@
+package driver
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/container-storage-interface/spec/lib/go/csi"
+ "github.com/golang/mock/gomock"
+
+ "github.com/kubernetes-sigs/aws-efs-csi-driver/pkg/cloud"
+ "github.com/kubernetes-sigs/aws-efs-csi-driver/pkg/driver/mocks"
+)
+
+func TestAccessPointProvisioner_Provision(t *testing.T) {
+ var (
+ fsId = "fs-abcd1234"
+ volumeName = "volumeName"
+ capacityRange int64 = 5368709120
+ stdVolCap = &csi.VolumeCapability{
+ AccessType: &csi.VolumeCapability_Mount{
+ Mount: &csi.VolumeCapability_MountVolume{},
+ },
+ AccessMode: &csi.VolumeCapability_AccessMode{
+ Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
+ },
+ }
+ )
+ tests := []struct {
+ name string
+ testFunc func(t *testing.T)
+ }{
+ {
+ name: "Fail: File system does not exist",
+ testFunc: func(t *testing.T) {
+ mockCtl := gomock.NewController(t)
+ mockCloud := mocks.NewMockCloud(mockCtl)
+ ctx := context.Background()
+ mockCloud.EXPECT().DescribeFileSystem(gomock.Eq(ctx), gomock.Any()).Return(nil, cloud.ErrNotFound)
+
+ req := &csi.CreateVolumeRequest{
+ Name: volumeName,
+ VolumeCapabilities: []*csi.VolumeCapability{
+ stdVolCap,
+ },
+ CapacityRange: &csi.CapacityRange{
+ RequiredBytes: capacityRange,
+ },
+ Parameters: map[string]string{
+ ProvisioningMode: "efs-ap",
+ FsId: fsId,
+ GidMin: "1000",
+ GidMax: "2000",
+ DirectoryPerms: "777",
+ Uid: "1000",
+ Gid: "1000",
+ },
+ }
+
+ apProv := AccessPointProvisioner{
+ BaseProvisioner: BaseProvisioner{
+ cloud: mockCloud,
+ },
+ tags: map[string]string{},
+ }
+
+ _, err := apProv.Provision(ctx, req)
+
+ if err == nil {
+ t.Fatal("Expected Provision to fail but it didn't")
+ }
+ mockCtl.Finish()
+ },
+ },
+ {
+ name: "Fail: DescribeFileSystem Access Denied",
+ testFunc: func(t *testing.T) {
+ mockCtl := gomock.NewController(t)
+ mockCloud := mocks.NewMockCloud(mockCtl)
+ ctx := context.Background()
+ mockCloud.EXPECT().DescribeFileSystem(gomock.Eq(ctx), gomock.Any()).Return(nil, cloud.ErrAccessDenied)
+
+ req := &csi.CreateVolumeRequest{
+ Name: volumeName,
+ VolumeCapabilities: []*csi.VolumeCapability{
+ stdVolCap,
+ },
+ CapacityRange: &csi.CapacityRange{
+ RequiredBytes: capacityRange,
+ },
+ Parameters: map[string]string{
+ ProvisioningMode: "efs-ap",
+ FsId: fsId,
+ GidMin: "1000",
+ GidMax: "2000",
+ DirectoryPerms: "777",
+ Uid: "1000",
+ Gid: "1000",
+ },
+ }
+
+ apProv := AccessPointProvisioner{
+ BaseProvisioner: BaseProvisioner{
+ cloud: mockCloud,
+ },
+ tags: map[string]string{},
+ }
+
+ _, err := apProv.Provision(ctx, req)
+ if err == nil {
+ t.Fatal("Expected Provision to fail but it didn't")
+ }
+
+ mockCtl.Finish()
+ },
+ },
+ {
+ name: "Fail: Describe File system call fails",
+ testFunc: func(t *testing.T) {
+ mockCtl := gomock.NewController(t)
+ mockCloud := mocks.NewMockCloud(mockCtl)
+ ctx := context.Background()
+ mockCloud.EXPECT().DescribeFileSystem(gomock.Eq(ctx), gomock.Any()).Return(nil, errors.New("DescribeFileSystem failed"))
+
+ req := &csi.CreateVolumeRequest{
+ Name: volumeName,
+ VolumeCapabilities: []*csi.VolumeCapability{
+ stdVolCap,
+ },
+ CapacityRange: &csi.CapacityRange{
+ RequiredBytes: capacityRange,
+ },
+ Parameters: map[string]string{
+ ProvisioningMode: "efs-ap",
+ FsId: fsId,
+ GidMin: "1000",
+ GidMax: "2000",
+ DirectoryPerms: "777",
+ Uid: "1000",
+ Gid: "1000",
+ },
+ }
+
+ apProv := AccessPointProvisioner{
+ BaseProvisioner: BaseProvisioner{
+ cloud: mockCloud,
+ },
+ tags: map[string]string{},
+ }
+
+ _, err := apProv.Provision(ctx, req)
+
+ if err == nil {
+ t.Fatal("Expected Provision to fail but it didn't")
+ }
+ mockCtl.Finish()
+ },
+ },
+ {
+ name: "Fail: Create Access Point call fails",
+ testFunc: func(t *testing.T) {
+ mockCtl := gomock.NewController(t)
+ mockCloud := mocks.NewMockCloud(mockCtl)
+ ctx := context.Background()
+ fileSystem := &cloud.FileSystem{
+ FileSystemId: fsId,
+ }
+
+ mockCloud.EXPECT().DescribeFileSystem(gomock.Eq(ctx), gomock.Any()).Return(fileSystem, nil)
+ mockCloud.EXPECT().CreateAccessPoint(gomock.Eq(ctx), gomock.Any(), gomock.Any()).Return(nil, errors.New("CreateAccessPoint call failed"))
+
+ req := &csi.CreateVolumeRequest{
+ Name: volumeName,
+ VolumeCapabilities: []*csi.VolumeCapability{
+ stdVolCap,
+ },
+ CapacityRange: &csi.CapacityRange{
+ RequiredBytes: capacityRange,
+ },
+ Parameters: map[string]string{
+ ProvisioningMode: "efs-ap",
+ FsId: fsId,
+ GidMin: "1000",
+ GidMax: "2000",
+ DirectoryPerms: "777",
+ Uid: "1000",
+ Gid: "1000",
+ },
+ }
+
+ apProv := AccessPointProvisioner{
+ BaseProvisioner: BaseProvisioner{
+ cloud: mockCloud,
+ },
+ tags: map[string]string{},
+ }
+
+ _, err := apProv.Provision(ctx, req)
+
+ if err == nil {
+ t.Fatal("Expected Provision to fail but it didn't")
+ }
+ mockCtl.Finish()
+ },
+ },
+ {
+ name: "Fail: CreateAccessPoint Access Denied",
+ testFunc: func(t *testing.T) {
+ mockCtl := gomock.NewController(t)
+ mockCloud := mocks.NewMockCloud(mockCtl)
+
+ ctx := context.Background()
+ fileSystem := &cloud.FileSystem{
+ FileSystemId: fsId,
+ }
+ mockCloud.EXPECT().DescribeFileSystem(gomock.Eq(ctx), gomock.Any()).Return(fileSystem, nil)
+ mockCloud.EXPECT().CreateAccessPoint(gomock.Eq(ctx), gomock.Any(), gomock.Any()).Return(nil, cloud.ErrAccessDenied)
+
+ req := &csi.CreateVolumeRequest{
+ Name: volumeName,
+ VolumeCapabilities: []*csi.VolumeCapability{
+ stdVolCap,
+ },
+ CapacityRange: &csi.CapacityRange{
+ RequiredBytes: capacityRange,
+ },
+ Parameters: map[string]string{
+ ProvisioningMode: "efs-ap",
+ FsId: fsId,
+ GidMin: "1000",
+ GidMax: "2000",
+ DirectoryPerms: "777",
+ Uid: "1000",
+ Gid: "1000",
+ },
+ }
+
+ apProv := AccessPointProvisioner{
+ BaseProvisioner: BaseProvisioner{
+ cloud: mockCloud,
+ },
+ tags: map[string]string{},
+ }
+
+ _, err := apProv.Provision(ctx, req)
+
+ if err == nil {
+ t.Fatal("Expected Provision to fail but it didn't")
+ }
+ mockCtl.Finish()
+ },
+ },
+ {
+ name: "Fail: Cannot assume role for x-account",
+ testFunc: func(t *testing.T) {
+ mockCtl := gomock.NewController(t)
+ mockCloud := mocks.NewMockCloud(mockCtl)
+
+ secrets := map[string]string{}
+ secrets["awsRoleArn"] = "arn:aws:iam::1234567890:role/EFSCrossAccountRole"
+
+ req := &csi.CreateVolumeRequest{
+ Name: volumeName,
+ VolumeCapabilities: []*csi.VolumeCapability{
+ stdVolCap,
+ },
+ CapacityRange: &csi.CapacityRange{
+ RequiredBytes: capacityRange,
+ },
+ Parameters: map[string]string{
+ ProvisioningMode: "efs-ap",
+ FsId: fsId,
+ GidMin: "1000",
+ GidMax: "2000",
+ DirectoryPerms: "777",
+ AzName: "us-east-1a",
+ Uid: "1000",
+ Gid: "1000",
+ },
+ Secrets: secrets,
+ }
+
+ ctx := context.Background()
+
+ apProv := AccessPointProvisioner{
+ BaseProvisioner: BaseProvisioner{
+ cloud: mockCloud,
+ },
+ tags: map[string]string{},
+ }
+
+ _, err := apProv.Provision(ctx, req)
+
+ if err == nil {
+ t.Fatal("Expected Provision to fail but it didn't")
+ }
+
+ mockCtl.Finish()
+ },
+ },
+ }
+ for _, test := range tests {
+ t.Run(test.name, test.testFunc)
+ }
+}
+
+func TestAccessPointProvisioner_Delete(t *testing.T) {
+ var (
+ fsId = "fs-abcd1234"
+ apId = "fsap-abcd1234xyz987"
+ volumeId = fmt.Sprintf("%s::%s", fsId, apId)
+ )
+
+ tests := []struct {
+ name string
+ testFunc func(t *testing.T)
+ }{
+ {
+ name: "Success: Setting deleteAccessPointRootDir causes rootDir to be deleted",
+ testFunc: func(t *testing.T) {
+ mockCtl := gomock.NewController(t)
+ mockCloud := mocks.NewMockCloud(mockCtl)
+ mockMounter := mocks.NewMockMounter(mockCtl)
+
+ req := &csi.DeleteVolumeRequest{
+ VolumeId: volumeId,
+ }
+
+ accessPoint := &cloud.AccessPoint{
+ AccessPointId: apId,
+ FileSystemId: fsId,
+ AccessPointRootDir: "",
+ CapacityGiB: 0,
+ }
+
+ dirPresent := mocks.NewMockFileInfo(
+ "testFile",
+ 0,
+ 0755,
+ time.Now(),
+ true,
+ nil,
+ )
+
+ ctx := context.Background()
+ mockMounter.EXPECT().MakeDir(gomock.Any()).Return(nil)
+ mockMounter.EXPECT().Mount(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
+ mockMounter.EXPECT().Unmount(gomock.Any()).Return(nil)
+ mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Any()).Return(true, nil)
+ mockMounter.EXPECT().Stat(gomock.Any()).Return(dirPresent, nil)
+ mockCloud.EXPECT().DescribeAccessPoint(gomock.Eq(ctx), gomock.Eq(apId)).Return(accessPoint, nil)
+ mockCloud.EXPECT().DeleteAccessPoint(gomock.Eq(ctx), gomock.Eq(apId)).Return(nil)
+
+ apProv := AccessPointProvisioner{
+ BaseProvisioner: BaseProvisioner{
+ cloud: mockCloud,
+ mounter: mockMounter,
+ },
+ tags: map[string]string{},
+ deleteAccessPointRootDir: true,
+ }
+
+ err := apProv.Delete(ctx, req)
+
+ if err != nil {
+ t.Fatalf("Expected Delete to succeed but it failed: %v", err)
+ }
+ mockCtl.Finish()
+ },
+ },
+ {
+ name: "Success: If AccessPoint does not exist success is returned as no work needs to be done",
+ testFunc: func(t *testing.T) {
+ mockCtl := gomock.NewController(t)
+ mockCloud := mocks.NewMockCloud(mockCtl)
+ mockMounter := mocks.NewMockMounter(mockCtl)
+
+ req := &csi.DeleteVolumeRequest{
+ VolumeId: volumeId,
+ }
+
+ ctx := context.Background()
+ mockCloud.EXPECT().DescribeAccessPoint(gomock.Eq(ctx), gomock.Eq(apId)).Return(nil, cloud.ErrNotFound)
+ mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Any()).Return(true, nil)
+
+ apProv := AccessPointProvisioner{
+ BaseProvisioner: BaseProvisioner{
+ cloud: mockCloud,
+ mounter: mockMounter,
+ },
+ tags: map[string]string{},
+ deleteAccessPointRootDir: true,
+ }
+
+ err := apProv.Delete(ctx, req)
+
+ if err != nil {
+ t.Fatalf("Expected Delete to succeed but it failed: %v", err)
+ }
+ mockCtl.Finish()
+ },
+ },
+ {
+ name: "Fail: Return error if AccessDenied error from AWS",
+ testFunc: func(t *testing.T) {
+ mockCtl := gomock.NewController(t)
+ mockCloud := mocks.NewMockCloud(mockCtl)
+ mockMounter := mocks.NewMockMounter(mockCtl)
+
+ req := &csi.DeleteVolumeRequest{
+ VolumeId: volumeId,
+ }
+
+ ctx := context.Background()
+ mockCloud.EXPECT().DescribeAccessPoint(gomock.Eq(ctx), gomock.Eq(apId)).Return(nil, cloud.ErrAccessDenied)
+ mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Any()).Return(true, nil)
+
+ apProv := AccessPointProvisioner{
+ BaseProvisioner: BaseProvisioner{
+ cloud: mockCloud,
+ mounter: mockMounter,
+ },
+ tags: map[string]string{},
+ deleteAccessPointRootDir: true,
+ }
+
+ err := apProv.Delete(ctx, req)
+
+ if err == nil {
+ t.Fatal("Expected Delete to fail but it succeeded")
+ }
+ mockCtl.Finish()
+ },
+ },
+ {
+ name: "Fail: Return error if DescribeAccessPoints failed",
+ testFunc: func(t *testing.T) {
+ mockCtl := gomock.NewController(t)
+ mockCloud := mocks.NewMockCloud(mockCtl)
+ mockMounter := mocks.NewMockMounter(mockCtl)
+
+ req := &csi.DeleteVolumeRequest{
+ VolumeId: volumeId,
+ }
+
+ ctx := context.Background()
+ mockCloud.EXPECT().DescribeAccessPoint(gomock.Eq(ctx), gomock.Eq(apId)).Return(nil, errors.New("Describe Access Point failed"))
+ mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Any()).Return(true, nil)
+
+ apProv := AccessPointProvisioner{
+ BaseProvisioner: BaseProvisioner{
+ cloud: mockCloud,
+ mounter: mockMounter,
+ },
+ tags: map[string]string{},
+ deleteAccessPointRootDir: true,
+ }
+
+ err := apProv.Delete(ctx, req)
+
+ if err == nil {
+ t.Fatal("Expected Delete to fail but it succeeded")
+ }
+ mockCtl.Finish()
+ },
+ },
+ {
+ name: "Fail: Fail to make directory for access point mount",
+ testFunc: func(t *testing.T) {
+ mockCtl := gomock.NewController(t)
+ mockCloud := mocks.NewMockCloud(mockCtl)
+ mockMounter := mocks.NewMockMounter(mockCtl)
+
+ req := &csi.DeleteVolumeRequest{
+ VolumeId: volumeId,
+ }
+
+ accessPoint := &cloud.AccessPoint{
+ AccessPointId: apId,
+ FileSystemId: fsId,
+ AccessPointRootDir: "",
+ CapacityGiB: 0,
+ }
+
+ ctx := context.Background()
+ mockMounter.EXPECT().MakeDir(gomock.Any()).Return(errors.New("Failed to makeDir"))
+ mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Any()).Return(true, nil)
+ mockCloud.EXPECT().DescribeAccessPoint(gomock.Eq(ctx), gomock.Eq(apId)).Return(accessPoint, nil)
+
+ apProv := AccessPointProvisioner{
+ BaseProvisioner: BaseProvisioner{
+ cloud: mockCloud,
+ mounter: mockMounter,
+ },
+ tags: map[string]string{},
+ deleteAccessPointRootDir: true,
+ }
+
+ err := apProv.Delete(ctx, req)
+
+ if err == nil {
+ t.Fatal("Expected Delete to fail but it succeeded")
+ }
+ mockCtl.Finish()
+ },
+ },
+ {
+ name: "Fail: Fail to mount file system on directory for access point root directory removal",
+ testFunc: func(t *testing.T) {
+ mockCtl := gomock.NewController(t)
+ mockCloud := mocks.NewMockCloud(mockCtl)
+ mockMounter := mocks.NewMockMounter(mockCtl)
+
+ req := &csi.DeleteVolumeRequest{
+ VolumeId: volumeId,
+ }
+
+ accessPoint := &cloud.AccessPoint{
+ AccessPointId: apId,
+ FileSystemId: fsId,
+ AccessPointRootDir: "",
+ CapacityGiB: 0,
+ }
+
+ ctx := context.Background()
+ mockMounter.EXPECT().MakeDir(gomock.Any()).Return(nil)
+ mockMounter.EXPECT().Mount(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("Failed to mount"))
+ mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Any()).Return(true, nil).Times(2)
+ mockCloud.EXPECT().DescribeAccessPoint(gomock.Eq(ctx), gomock.Eq(apId)).Return(accessPoint, nil)
+
+ apProv := AccessPointProvisioner{
+ BaseProvisioner: BaseProvisioner{
+ cloud: mockCloud,
+ mounter: mockMounter,
+ },
+ tags: map[string]string{},
+ deleteAccessPointRootDir: true,
+ }
+
+ err := apProv.Delete(ctx, req)
+
+ if err == nil {
+ t.Fatal("Expected Delete to fail but it succeeded")
+ }
+ mockCtl.Finish()
+ },
+ },
+ {
+ name: "Fail: Fail to unmount file system after access point root directory removal",
+ testFunc: func(t *testing.T) {
+ mockCtl := gomock.NewController(t)
+ mockCloud := mocks.NewMockCloud(mockCtl)
+ mockMounter := mocks.NewMockMounter(mockCtl)
+
+ req := &csi.DeleteVolumeRequest{
+ VolumeId: volumeId,
+ }
+
+ accessPoint := &cloud.AccessPoint{
+ AccessPointId: apId,
+ FileSystemId: fsId,
+ AccessPointRootDir: "",
+ CapacityGiB: 0,
+ }
+
+ dirPresent := mocks.NewMockFileInfo(
+ "testFile",
+ 0,
+ 0755,
+ time.Now(),
+ true,
+ nil,
+ )
+
+ ctx := context.Background()
+ mockMounter.EXPECT().MakeDir(gomock.Any()).Return(nil)
+ mockMounter.EXPECT().Mount(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
+ mockMounter.EXPECT().Unmount(gomock.Any()).Return(errors.New("Failed to unmount"))
+ mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Any()).Return(true, nil).Times(2)
+ mockMounter.EXPECT().Stat(gomock.Any()).Return(dirPresent, nil)
+ mockCloud.EXPECT().DescribeAccessPoint(gomock.Eq(ctx), gomock.Eq(apId)).Return(accessPoint, nil)
+
+ apProv := AccessPointProvisioner{
+ BaseProvisioner: BaseProvisioner{
+ cloud: mockCloud,
+ mounter: mockMounter,
+ },
+ tags: map[string]string{},
+ deleteAccessPointRootDir: true,
+ }
+
+ err := apProv.Delete(ctx, req)
+
+ if err == nil {
+ t.Fatal("Expected Delete to fail but it succeeded")
+ }
+ mockCtl.Finish()
+ },
+ },
+ {
+ name: "Fail: DeleteAccessPoint access denied",
+ testFunc: func(t *testing.T) {
+ mockCtl := gomock.NewController(t)
+ mockCloud := mocks.NewMockCloud(mockCtl)
+
+ req := &csi.DeleteVolumeRequest{
+ VolumeId: volumeId,
+ }
+
+ ctx := context.Background()
+ mockCloud.EXPECT().DeleteAccessPoint(gomock.Eq(ctx), gomock.Eq(apId)).Return(cloud.ErrAccessDenied)
+
+ apProv := AccessPointProvisioner{
+ BaseProvisioner: BaseProvisioner{
+ cloud: mockCloud,
+ },
+ tags: map[string]string{},
+ }
+
+ err := apProv.Delete(ctx, req)
+
+ if err == nil {
+ t.Fatal("Expected Delete to fail but it succeeded")
+ }
+ mockCtl.Finish()
+ },
+ },
+ {
+ name: "Fail: Cannot assume role for x-account",
+ testFunc: func(t *testing.T) {
+ mockCtl := gomock.NewController(t)
+ mockCloud := mocks.NewMockCloud(mockCtl)
+ mockMounter := mocks.NewMockMounter(mockCtl)
+
+ secrets := map[string]string{}
+ secrets["awsRoleArn"] = "arn:aws:iam::1234567890:role/EFSCrossAccountRole"
+
+ req := &csi.DeleteVolumeRequest{
+ VolumeId: volumeId,
+ Secrets: secrets,
+ }
+
+ ctx := context.Background()
+ mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Any()).Return(true, nil)
+
+ apProv := AccessPointProvisioner{
+ BaseProvisioner: BaseProvisioner{
+ cloud: mockCloud,
+ mounter: mockMounter,
+ },
+ tags: map[string]string{},
+ deleteAccessPointRootDir: true,
+ }
+
+ err := apProv.Delete(ctx, req)
+
+ if err == nil {
+ t.Fatal("Expected Delete to fail but it succeeded")
+ }
+ mockCtl.Finish()
+ },
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, test.testFunc)
+ }
+}
diff --git a/pkg/driver/provisioner_dir.go b/pkg/driver/provisioner_dir.go
new file mode 100644
index 000000000..a8f5525f1
--- /dev/null
+++ b/pkg/driver/provisioner_dir.go
@@ -0,0 +1,229 @@
+package driver
+
+import (
+ "context"
+ "os"
+ "path"
+ "strconv"
+ "strings"
+
+ "github.com/google/uuid"
+
+ "github.com/container-storage-interface/spec/lib/go/csi"
+ "github.com/kubernetes-sigs/aws-efs-csi-driver/pkg/cloud"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+ "k8s.io/klog/v2"
+)
+
+type DirectoryProvisioner struct {
+ BaseProvisioner
+ osClient OsClient
+ deleteProvisionedDir bool
+}
+
+func (d *DirectoryProvisioner) Provision(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.Volume, error) {
+ var provisionedPath string
+
+ var fileSystemId string
+ volumeParams := req.GetParameters()
+ if value, ok := volumeParams[FsId]; ok {
+ if strings.TrimSpace(value) == "" {
+ return nil, status.Errorf(codes.InvalidArgument, "Parameter %v cannot be empty", FsId)
+ }
+ fileSystemId = value
+ } else {
+ return nil, status.Errorf(codes.InvalidArgument, "Missing %v parameter", FsId)
+ }
+ klog.V(5).Infof("Provisioning directory on FileSystem %s...", fileSystemId)
+
+ localCloud, roleArn, crossAccountDNSEnabled, err := getCloud(req.GetSecrets(), d.cloud, d.adaptiveRetryMode)
+ if err != nil {
+ return nil, err
+ }
+
+ var azName string
+ if value, ok := volumeParams[AzName]; ok {
+ azName = value
+ }
+
+ mountOptions, mountTargetAddress, err := getMountOptions(ctx, localCloud, fileSystemId, roleArn, crossAccountDNSEnabled, azName)
+ if err != nil {
+ return nil, err
+ }
+ target := TempMountPathPrefix + "/" + uuid.New().String()
+ if err := d.mounter.MakeDir(target); err != nil {
+ return nil, status.Errorf(codes.Internal, "Could not create dir %q: %v", target, err)
+ }
+ if err := d.mounter.Mount(fileSystemId, target, "efs", mountOptions); err != nil {
+ return nil, status.Errorf(codes.Internal, "Could not mount %q at %q: %v", fileSystemId, target, err)
+ }
+ // Extract the basePath
+ var basePath string
+ if value, ok := volumeParams[BasePath]; ok {
+ basePath = value
+ }
+
+ rootDirName := req.Name
+ provisionedPath = path.Join(basePath, rootDirName)
+
+ klog.V(5).Infof("Provisioning directory at path %s", provisionedPath)
+
+ // Grab the required permissions
+ perms := os.FileMode(0777)
+ if value, ok := volumeParams[DirectoryPerms]; ok {
+ parsedPerms, err := strconv.ParseUint(value, 8, 32)
+ if err == nil {
+ perms = os.FileMode(parsedPerms)
+ }
+ }
+
+ var uid int
+ if value, ok := volumeParams[Uid]; ok {
+ id, err := strconv.ParseInt(value, 10, 32)
+ if err != nil {
+ return nil, status.Errorf(codes.InvalidArgument, "Failed to parse invalid %v: %v", Uid, err)
+ }
+ if id < 0 {
+ return nil, status.Errorf(codes.InvalidArgument, "%v must be greater or equal than 0", Uid)
+ }
+ uid = int(id)
+ }
+
+ var gid int
+ if value, ok := volumeParams[Gid]; ok {
+ id, err := strconv.ParseInt(value, 10, 32)
+ if err != nil {
+ return nil, status.Errorf(codes.InvalidArgument, "Failed to parse invalid %v: %v", Gid, err)
+ }
+ if id < 0 {
+ return nil, status.Errorf(codes.InvalidArgument, "%v must be greater or equal than 0", Gid)
+ }
+ gid = int(id)
+ }
+
+ klog.V(5).Infof("Provisioning directory with permissions %s, uid %d, gid %d", perms, uid, gid)
+
+ provisionedDirectory := path.Join(target, provisionedPath)
+ err = d.osClient.MkDirAllWithPerms(provisionedDirectory, perms, uid, gid)
+ if err != nil {
+ return nil, status.Errorf(codes.Internal, "Could not provision directory: %v", err)
+ }
+
+ // Check the permissions that actually got created
+ actualPerms, err := d.osClient.GetPerms(provisionedDirectory)
+ if err != nil {
+ klog.V(5).Infof("Could not load file info for '%s'", provisionedDirectory)
+ }
+ klog.V(5).Infof("Permissions of folder '%s' are '%s'", provisionedDirectory, actualPerms)
+
+ err = d.mounter.Unmount(target)
+ if err != nil {
+ return nil, status.Errorf(codes.Internal, "Could not unmount %q: %v", target, err)
+ }
+ err = d.osClient.Remove(target)
+ if err != nil {
+ return nil, status.Errorf(codes.Internal, "Could not delete %q: %v", target, err)
+ }
+
+ volContext := map[string]string{}
+
+ // Enable cross-account dns resolution or fetch mount target Ip for cross-account mount
+ if roleArn != "" {
+ if crossAccountDNSEnabled {
+ // This option indicates the customer would like to use DNS to resolve
+ // the cross-account mount target ip address (in order to mount to
+ // the same AZ-ID as the client instance); mounttargetip should
+ // not be used as a mount option in this case.
+ volContext[CrossAccount] = strconv.FormatBool(true)
+ } else {
+ if mountTargetAddress != "" {
+ volContext[MountTargetIp] = mountTargetAddress
+ }
+
+ }
+ }
+
+ return &csi.Volume{
+ CapacityBytes: req.GetCapacityRange().GetRequiredBytes(),
+ VolumeId: fileSystemId + ":" + provisionedPath,
+ VolumeContext: volContext,
+ }, nil
+}
+
+func (d *DirectoryProvisioner) Delete(ctx context.Context, req *csi.DeleteVolumeRequest) (e error) {
+ if !d.deleteProvisionedDir {
+ return nil
+ }
+ fileSystemId, subpath, _, _ := parseVolumeId(req.GetVolumeId())
+ klog.V(5).Infof("Running delete for EFS %s at subpath %s", fileSystemId, subpath)
+
+ localCloud, roleArn, crossAccountDNSEnabled, err := getCloud(req.GetSecrets(), d.cloud, d.adaptiveRetryMode)
+ if err != nil {
+ return err
+ }
+
+ mountOptions, _, err := getMountOptions(ctx, localCloud, fileSystemId, roleArn, crossAccountDNSEnabled, "")
+ if err != nil {
+ return err
+ }
+
+ target := TempMountPathPrefix + "/" + uuid.New().String()
+ klog.V(5).Infof("Making temporary directory at '%s' to temporarily mount EFS folder in", target)
+ if err := d.mounter.MakeDir(target); err != nil {
+ return status.Errorf(codes.Internal, "Could not create dir %q: %v", target, err)
+ }
+
+ defer func() {
+ // Try and unmount the directory
+ klog.V(5).Infof("Unmounting directory mounted at '%s'", target)
+ unmountErr := d.mounter.Unmount(target)
+ // If that fails then track the error but don't do anything else
+ if unmountErr != nil {
+ klog.V(5).Infof("Unmount failed at '%s'", target)
+ e = status.Errorf(codes.Internal, "Could not unmount %q: %v (prev error: %v)", target, unmountErr, e)
+ } else {
+ // If it is nil then it's safe to try and delete the directory as it should now be empty
+ klog.V(5).Infof("Deleting temporary directory at '%s'", target)
+ if err := d.osClient.Remove(target); err != nil && !os.IsNotExist(err) {
+ e = status.Errorf(codes.Internal, "Could not delete %q: %v (prev error: %v)", target, err, e)
+ }
+ }
+ }()
+
+ klog.V(5).Infof("Mounting EFS '%s' into temporary directory at '%s'", fileSystemId, target)
+ if err := d.mounter.Mount(fileSystemId, target, "efs", mountOptions); err != nil {
+ // If this call throws an error we're about to return anyway and the mount has failed, so it's more
+ // important we return with that information than worry about the folder not being deleted
+ _ = d.osClient.Remove(target)
+ return status.Errorf(codes.Internal, "Could not mount %q at %q: %v", fileSystemId, target, err)
+ }
+
+ pathToRemove := path.Join(target, subpath)
+ klog.V(5).Infof("Delete all files at %s, stored on EFS %s", pathToRemove, fileSystemId)
+ if err := d.osClient.RemoveAll(pathToRemove); err != nil {
+ return status.Errorf(codes.Internal, "Could not delete directory %q: %v", subpath, err)
+ }
+
+ return nil
+}
+
+func getMountOptions(ctx context.Context, cloud cloud.Cloud, fileSystemId string, roleArn string, crossAccountDNSEnabled bool, azName string) ([]string, string, error) {
+ mountOptions := []string{"tls", "iam"}
+ mountTargetAddress := ""
+ if roleArn != "" {
+ if crossAccountDNSEnabled {
+ // Connect via dns rather than mounttargetip
+ mountOptions = append(mountOptions, CrossAccount)
+ } else {
+ mountTarget, err := cloud.DescribeMountTargets(ctx, fileSystemId, azName)
+ if err == nil {
+ mountTargetAddress = mountTarget.IPAddress
+ mountOptions = append(mountOptions, MountTargetIp+"="+mountTargetAddress)
+ } else {
+ klog.Warningf("Failed to describe mount targets for file system %v. Skip using `mounttargetip` mount option: %v", fileSystemId, err)
+ }
+ }
+ }
+ return mountOptions, mountTargetAddress, nil
+}
diff --git a/pkg/driver/provisioner_dir_test.go b/pkg/driver/provisioner_dir_test.go
new file mode 100644
index 000000000..7a56f1e16
--- /dev/null
+++ b/pkg/driver/provisioner_dir_test.go
@@ -0,0 +1,619 @@
+package driver
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "testing"
+
+ "github.com/container-storage-interface/spec/lib/go/csi"
+ "github.com/golang/mock/gomock"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+ "k8s.io/mount-utils"
+
+ "github.com/kubernetes-sigs/aws-efs-csi-driver/pkg/cloud"
+ "github.com/kubernetes-sigs/aws-efs-csi-driver/pkg/driver/mocks"
+)
+
+const fileSystemId = "fs-123456789"
+
+func TestDirectoryProvisioner_Provision(t *testing.T) {
+ var (
+ fsId = "fs-abcd1234"
+ volumeName = "volumeName"
+ capacityRange int64 = 5368709120
+ stdVolCap = &csi.VolumeCapability{
+ AccessType: &csi.VolumeCapability_Mount{
+ Mount: &csi.VolumeCapability_MountVolume{},
+ },
+ AccessMode: &csi.VolumeCapability_AccessMode{
+ Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
+ },
+ }
+ )
+ tests := []struct {
+ name string
+ testFunc func(t *testing.T)
+ }{
+ {
+ name: "Success: Check path created is sensible",
+ testFunc: func(t *testing.T) {
+ mockCtl := gomock.NewController(t)
+ mockMounter := mocks.NewMockMounter(mockCtl)
+ mockMounter.EXPECT().MakeDir(gomock.Any()).Return(nil)
+ mockMounter.EXPECT().Mount(fsId, gomock.Any(), "efs", gomock.Any()).Return(nil)
+ mockMounter.EXPECT().Unmount(gomock.Any()).Return(nil)
+
+ ctx := context.Background()
+
+ req := &csi.CreateVolumeRequest{
+ Name: volumeName,
+ VolumeCapabilities: []*csi.VolumeCapability{
+ stdVolCap,
+ },
+ CapacityRange: &csi.CapacityRange{
+ RequiredBytes: capacityRange,
+ },
+ Parameters: map[string]string{
+ ProvisioningMode: DirectoryMode,
+ FsId: fsId,
+ GidMin: "1000",
+ GidMax: "2000",
+ DirectoryPerms: "777",
+ BasePath: "/dynamic",
+ Uid: "1000",
+ Gid: "1000",
+ },
+ }
+
+ dProv := DirectoryProvisioner{
+ BaseProvisioner: BaseProvisioner{
+ mounter: mockMounter,
+ },
+ osClient: &FakeOsClient{},
+ }
+
+ volume, err := dProv.Provision(ctx, req)
+
+ if err != nil {
+ t.Fatalf("Expected provision call to succeed but failed: %v", err)
+ }
+
+ expectedVolumeId := fmt.Sprintf("%s:/dynamic/%s", fsId, req.Name)
+ if volume.VolumeId != expectedVolumeId {
+ t.Fatalf("Expected volumeId to be %s but was %s", expectedVolumeId, volume.VolumeId)
+ }
+ },
+ },
+ {
+ name: "Fail: Return error for empty fsId",
+ testFunc: func(t *testing.T) {
+ mockCtl := gomock.NewController(t)
+ mockMounter := mocks.NewMockMounter(mockCtl)
+
+ ctx := context.Background()
+
+ req := &csi.CreateVolumeRequest{
+ Name: volumeName,
+ VolumeCapabilities: []*csi.VolumeCapability{
+ stdVolCap,
+ },
+ CapacityRange: &csi.CapacityRange{
+ RequiredBytes: capacityRange,
+ },
+ Parameters: map[string]string{
+ ProvisioningMode: DirectoryMode,
+ GidMin: "1000",
+ GidMax: "2000",
+ DirectoryPerms: "777",
+ Uid: "1000",
+ Gid: "1000",
+ },
+ }
+
+ dProv := DirectoryProvisioner{
+ BaseProvisioner: BaseProvisioner{
+ mounter: mockMounter,
+ },
+ }
+
+ _, err := dProv.Provision(ctx, req)
+
+ if err == nil {
+ t.Fatal("Expected error but found none")
+ }
+ if status.Code(err) != codes.InvalidArgument {
+ t.Fatalf("Expected InvalidArgument error but instead got %v", err)
+ }
+ },
+ },
+ {
+ name: "Fail: Mounter cannot create target directory on node",
+ testFunc: func(t *testing.T) {
+ mockCtl := gomock.NewController(t)
+ mockMounter := mocks.NewMockMounter(mockCtl)
+ mockMounter.EXPECT().MakeDir(gomock.Any()).Return(
+ io.ErrUnexpectedEOF)
+
+ ctx := context.Background()
+
+ req := &csi.CreateVolumeRequest{
+ Name: volumeName,
+ VolumeCapabilities: []*csi.VolumeCapability{
+ stdVolCap,
+ },
+ CapacityRange: &csi.CapacityRange{
+ RequiredBytes: capacityRange,
+ },
+ Parameters: map[string]string{
+ ProvisioningMode: DirectoryMode,
+ FsId: fsId,
+ GidMin: "1000",
+ GidMax: "2000",
+ DirectoryPerms: "777",
+ Uid: "1000",
+ Gid: "1000",
+ },
+ }
+
+ dProv := DirectoryProvisioner{
+ BaseProvisioner: BaseProvisioner{
+ mounter: mockMounter,
+ },
+ }
+
+ _, err := dProv.Provision(ctx, req)
+
+ if err == nil {
+ t.Fatal("Expected error but found none")
+ }
+ if status.Code(err) != codes.Internal && errors.Is(errors.Unwrap(err), io.ErrUnexpectedEOF) {
+ t.Fatalf("Expected mount error but instead got %v", err)
+ }
+ },
+ },
+ {
+ name: "Fail: Mounter cannot mount into target directory",
+ testFunc: func(t *testing.T) {
+ mockCtl := gomock.NewController(t)
+ mockMounter := mocks.NewMockMounter(mockCtl)
+ mockMounter.EXPECT().MakeDir(gomock.Any()).Return(nil)
+ mockMounter.EXPECT().Mount(fsId, gomock.Any(), "efs", gomock.Any()).Return(
+ mount.NewMountError(mount.HasFilesystemErrors, "Errors"))
+
+ ctx := context.Background()
+
+ req := &csi.CreateVolumeRequest{
+ Name: volumeName,
+ VolumeCapabilities: []*csi.VolumeCapability{
+ stdVolCap,
+ },
+ CapacityRange: &csi.CapacityRange{
+ RequiredBytes: capacityRange,
+ },
+ Parameters: map[string]string{
+ ProvisioningMode: DirectoryMode,
+ FsId: fsId,
+ GidMin: "1000",
+ GidMax: "2000",
+ DirectoryPerms: "777",
+ Uid: "1000",
+ Gid: "1000",
+ },
+ }
+
+ dProv := DirectoryProvisioner{
+ BaseProvisioner: BaseProvisioner{
+ mounter: mockMounter,
+ },
+ }
+
+ _, err := dProv.Provision(ctx, req)
+
+ if err == nil {
+ t.Fatal("Expected error but found none")
+ }
+ if status.Code(err) != codes.Internal && errors.Is(errors.Unwrap(err), mount.MountError{}) {
+ t.Fatalf("Expected mount error but instead got %v", err)
+ }
+ },
+ },
+ {
+ name: "Fail: Could not create directory after mounting root",
+ testFunc: func(t *testing.T) {
+ mockCtl := gomock.NewController(t)
+ mockMounter := mocks.NewMockMounter(mockCtl)
+ mockMounter.EXPECT().MakeDir(gomock.Any()).Return(nil)
+ mockMounter.EXPECT().Mount(fsId, gomock.Any(), "efs", gomock.Any()).Return(nil)
+
+ ctx := context.Background()
+
+ req := &csi.CreateVolumeRequest{
+ Name: volumeName,
+ VolumeCapabilities: []*csi.VolumeCapability{
+ stdVolCap,
+ },
+ CapacityRange: &csi.CapacityRange{
+ RequiredBytes: capacityRange,
+ },
+ Parameters: map[string]string{
+ ProvisioningMode: DirectoryMode,
+ FsId: fsId,
+ GidMin: "1000",
+ GidMax: "2000",
+ DirectoryPerms: "777",
+ BasePath: "/dynamic",
+ Uid: "1000",
+ Gid: "1000",
+ },
+ }
+
+ dProv := DirectoryProvisioner{
+ BaseProvisioner: BaseProvisioner{
+ mounter: mockMounter,
+ },
+ osClient: &BrokenOsClient{},
+ }
+
+ _, err := dProv.Provision(ctx, req)
+
+ if err == nil {
+ t.Fatal("Expected error but found none")
+ }
+ if status.Code(err) != codes.Internal && errors.Is(errors.Unwrap(err), &os.PathError{}) {
+ t.Fatalf("Expected path error but instead got %v", err)
+ }
+ },
+ },
+ {
+ name: "Fail: Could not unmount root directory post creation",
+ testFunc: func(t *testing.T) {
+ mockCtl := gomock.NewController(t)
+ mockMounter := mocks.NewMockMounter(mockCtl)
+ mockMounter.EXPECT().MakeDir(gomock.Any()).Return(nil)
+ mockMounter.EXPECT().Mount(fsId, gomock.Any(), "efs", gomock.Any()).Return(nil)
+ mockMounter.EXPECT().Unmount(gomock.Any()).Return(mount.NewMountError(mount.FilesystemMismatch, "Error"))
+
+ ctx := context.Background()
+
+ req := &csi.CreateVolumeRequest{
+ Name: volumeName,
+ VolumeCapabilities: []*csi.VolumeCapability{
+ stdVolCap,
+ },
+ CapacityRange: &csi.CapacityRange{
+ RequiredBytes: capacityRange,
+ },
+ Parameters: map[string]string{
+ ProvisioningMode: DirectoryMode,
+ FsId: fsId,
+ GidMin: "1000",
+ GidMax: "2000",
+ DirectoryPerms: "777",
+ BasePath: "/dynamic",
+ Uid: "1000",
+ Gid: "1000",
+ },
+ }
+
+ dProv := DirectoryProvisioner{
+ BaseProvisioner: BaseProvisioner{
+ mounter: mockMounter,
+ },
+ osClient: &FakeOsClient{},
+ }
+
+ _, err := dProv.Provision(ctx, req)
+
+ if err == nil {
+ t.Fatal("Expected error but found none")
+ }
+ if status.Code(err) != codes.Internal && errors.Is(errors.Unwrap(err), mount.MountError{}) {
+ t.Fatalf("Expected mount error but instead got %v", err)
+ }
+ },
+ },
+ {
+ name: "Fail: Could not delete target directory once unmounted",
+ testFunc: func(t *testing.T) {
+ mockCtl := gomock.NewController(t)
+ mockMounter := mocks.NewMockMounter(mockCtl)
+ mockMounter.EXPECT().MakeDir(gomock.Any()).Return(nil)
+ mockMounter.EXPECT().Mount(fsId, gomock.Any(), "efs", gomock.Any()).Return(nil)
+
+ ctx := context.Background()
+
+ req := &csi.CreateVolumeRequest{
+ Name: volumeName,
+ VolumeCapabilities: []*csi.VolumeCapability{
+ stdVolCap,
+ },
+ CapacityRange: &csi.CapacityRange{
+ RequiredBytes: capacityRange,
+ },
+ Parameters: map[string]string{
+ ProvisioningMode: DirectoryMode,
+ FsId: fsId,
+ GidMin: "1000",
+ GidMax: "2000",
+ DirectoryPerms: "777",
+ BasePath: "/dynamic",
+ Uid: "1000",
+ Gid: "1000",
+ },
+ }
+
+ dProv := DirectoryProvisioner{
+ BaseProvisioner: BaseProvisioner{
+ mounter: mockMounter,
+ },
+ osClient: &BrokenOsClient{},
+ }
+
+ _, err := dProv.Provision(ctx, req)
+
+ if err == nil {
+ t.Fatal("Expected error but found none")
+ }
+ if status.Code(err) != codes.Internal && errors.Is(errors.Unwrap(err), &os.PathError{}) {
+ t.Fatalf("Expected mount error but instead got %v", err)
+ }
+ },
+ },
+ }
+ for _, test := range tests {
+ t.Run(test.name, test.testFunc)
+ }
+}
+
+func TestDirectoryProvisioner_Delete(t *testing.T) {
+ var (
+ fsId = "fs-abcd1234"
+ volumeId = fmt.Sprintf("%s:%s", fsId, "/dynamic/newDir")
+ )
+
+ tests := []struct {
+ name string
+ testFunc func(t *testing.T)
+ }{
+ {
+ name: "Success: If retain directory is set nothing happens",
+ testFunc: func(t *testing.T) {
+ ctx := context.Background()
+
+ req := &csi.DeleteVolumeRequest{
+ VolumeId: volumeId,
+ }
+
+ dProv := DirectoryProvisioner{
+ deleteProvisionedDir: false,
+ }
+
+ err := dProv.Delete(ctx, req)
+
+ if err != nil {
+ t.Fatalf("Expected success but found %v", err)
+ }
+ },
+ },
+ {
+ name: "Success: If not retaining directory folder and contents are deleted",
+ testFunc: func(t *testing.T) {
+ ctx := context.Background()
+ mockCtl := gomock.NewController(t)
+ mockMounter := mocks.NewMockMounter(mockCtl)
+ mockMounter.EXPECT().MakeDir(gomock.Any()).Return(nil)
+ mockMounter.EXPECT().Mount(fsId, gomock.Any(), "efs", gomock.Any()).Return(nil)
+ mockMounter.EXPECT().Unmount(gomock.Any()).Return(nil)
+
+ req := &csi.DeleteVolumeRequest{
+ VolumeId: volumeId,
+ }
+
+ dProv := DirectoryProvisioner{
+ BaseProvisioner: BaseProvisioner{
+ mounter: mockMounter,
+ },
+ deleteProvisionedDir: true,
+ osClient: &FakeOsClient{},
+ }
+
+ err := dProv.Delete(ctx, req)
+
+ if err != nil {
+ t.Fatalf("Expected success but found %v", err)
+ }
+ },
+ },
+ {
+ name: "Fail: Mounter cannot create target directory on node",
+ testFunc: func(t *testing.T) {
+ mockCtl := gomock.NewController(t)
+ mockMounter := mocks.NewMockMounter(mockCtl)
+ mockMounter.EXPECT().MakeDir(gomock.Any()).Return(
+ io.ErrUnexpectedEOF)
+
+ ctx := context.Background()
+
+ req := &csi.DeleteVolumeRequest{
+ VolumeId: volumeId,
+ }
+
+ dProv := DirectoryProvisioner{
+ BaseProvisioner: BaseProvisioner{
+ mounter: mockMounter,
+ },
+ deleteProvisionedDir: true,
+ }
+
+ err := dProv.Delete(ctx, req)
+
+ if err == nil {
+ t.Fatal("Expected error but found none")
+ }
+ if status.Code(err) != codes.Internal && errors.Is(errors.Unwrap(err), io.ErrUnexpectedEOF) {
+ t.Fatalf("Expected mount error but instead got %v", err)
+ }
+ },
+ },
+ {
+ name: "Fail: Cannot delete contents of provisioned directory",
+ testFunc: func(t *testing.T) {
+ mockCtl := gomock.NewController(t)
+ mockMounter := mocks.NewMockMounter(mockCtl)
+ mockMounter.EXPECT().MakeDir(gomock.Any()).Return(nil)
+ mockMounter.EXPECT().Mount(fsId, gomock.Any(), "efs", gomock.Any()).Return(nil)
+ mockMounter.EXPECT().Unmount(gomock.Any()).Return(nil)
+
+ ctx := context.Background()
+
+ req := &csi.DeleteVolumeRequest{
+ VolumeId: volumeId,
+ }
+
+ dProv := DirectoryProvisioner{
+ BaseProvisioner: BaseProvisioner{
+ mounter: mockMounter,
+ },
+ deleteProvisionedDir: true,
+ osClient: &BrokenOsClient{},
+ }
+
+ err := dProv.Delete(ctx, req)
+
+ if err == nil {
+ t.Fatal("Expected error but found none")
+ }
+ if status.Code(err) != codes.Internal && errors.Is(errors.Unwrap(err), &os.PathError{}) {
+ t.Fatalf("Expected path error but instead got %v", err)
+ }
+ },
+ },
+ {
+ name: "Fail: Cannot unmount directory after contents have been deleted",
+ testFunc: func(t *testing.T) {
+ mockCtl := gomock.NewController(t)
+ mockMounter := mocks.NewMockMounter(mockCtl)
+ mockMounter.EXPECT().MakeDir(gomock.Any()).Return(nil)
+ mockMounter.EXPECT().Mount(fsId, gomock.Any(), "efs", gomock.Any()).Return(nil)
+ mockMounter.EXPECT().Unmount(gomock.Any()).Return(mount.NewMountError(mount.HasFilesystemErrors, "Errors"))
+
+ ctx := context.Background()
+
+ req := &csi.DeleteVolumeRequest{
+ VolumeId: volumeId,
+ }
+
+ dProv := DirectoryProvisioner{
+ BaseProvisioner: BaseProvisioner{
+ mounter: mockMounter,
+ },
+ deleteProvisionedDir: true,
+ osClient: &FakeOsClient{},
+ }
+
+ err := dProv.Delete(ctx, req)
+
+ if err == nil {
+ t.Fatal("Expected error but found none")
+ }
+ if status.Code(err) != codes.Internal && errors.Is(errors.Unwrap(err), mount.MountError{}) {
+ t.Fatalf("Expected mount error but instead got %v", err)
+ }
+ },
+ },
+ {
+ name: "Fail: Cannot delete temporary directory after unmount",
+ testFunc: func(t *testing.T) {
+ mockCtl := gomock.NewController(t)
+ mockMounter := mocks.NewMockMounter(mockCtl)
+ mockMounter.EXPECT().MakeDir(gomock.Any()).Return(nil)
+ mockMounter.EXPECT().Mount(fsId, gomock.Any(), "efs", gomock.Any()).Return(nil)
+ mockMounter.EXPECT().Unmount(gomock.Any()).Return(nil)
+
+ ctx := context.Background()
+
+ req := &csi.DeleteVolumeRequest{
+ VolumeId: volumeId,
+ }
+
+ dProv := DirectoryProvisioner{
+ BaseProvisioner: BaseProvisioner{
+ mounter: mockMounter,
+ },
+ deleteProvisionedDir: true,
+ osClient: &BrokenOsClient{},
+ }
+
+ err := dProv.Delete(ctx, req)
+
+ if err == nil {
+ t.Fatal("Expected error but found none")
+ }
+ if status.Code(err) != codes.Internal && errors.Is(errors.Unwrap(err), &os.PathError{}) {
+ t.Fatalf("Expected path error but instead got %v", err)
+ }
+ },
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, test.testFunc)
+ }
+}
+
+func TestDirectoryProvisioner_GetMountOptions_NoRoleArnGivesStandardOptions(t *testing.T) {
+ mockCtl := gomock.NewController(t)
+ mockCloud := mocks.NewMockCloud(mockCtl)
+ ctx := context.Background()
+ expectedOptions := []string{"tls", "iam"}
+
+ options, _, _ := getMountOptions(ctx, mockCloud, fileSystemId, "", false, "")
+
+ if !reflect.DeepEqual(options, expectedOptions) {
+ t.Fatalf("Expected returned options to be %v but was %v", expectedOptions, options)
+ }
+}
+
+func TestDirectoryProvisioner_GetMountOptions_RoleArnAddsMountTargetIp(t *testing.T) {
+ mockCtl := gomock.NewController(t)
+ mockCloud := mocks.NewMockCloud(mockCtl)
+ fakeMountTarget := cloud.MountTarget{
+ AZName: "foo",
+ AZId: "",
+ MountTargetId: "",
+ IPAddress: "8.8.8.8",
+ }
+ ctx := context.Background()
+ mockCloud.EXPECT().DescribeMountTargets(ctx, fileSystemId, "foo").Return(&fakeMountTarget, nil)
+
+ expectedOptions := []string{"tls", "iam", MountTargetIp + "=" + fakeMountTarget.IPAddress}
+
+ options, mountTargetAddress, _ := getMountOptions(ctx, mockCloud, fileSystemId, "roleArn", false, "foo")
+
+ if !reflect.DeepEqual(options, expectedOptions) {
+ t.Fatalf("Expected returned options to be %v but was %v", expectedOptions, options)
+ }
+
+ if mountTargetAddress != fakeMountTarget.IPAddress {
+ t.Fatalf("Expected returned mountTargetAddress to be %v but was %v", fakeMountTarget.IPAddress, mountTargetAddress)
+ }
+}
+
+func TestDirectoryProvisioner_GetMountOptions_RoleArnCross(t *testing.T) {
+ mockCtl := gomock.NewController(t)
+ mockCloud := mocks.NewMockCloud(mockCtl)
+ ctx := context.Background()
+
+ expectedOptions := []string{"tls", "iam", CrossAccount}
+
+ options, _, _ := getMountOptions(ctx, mockCloud, fileSystemId, "roleArn", true, "")
+
+ if !reflect.DeepEqual(options, expectedOptions) {
+ t.Fatalf("Expected returned options to be %v but was %v", expectedOptions, options)
+ }
+}
diff --git a/pkg/driver/provisioner_test.go b/pkg/driver/provisioner_test.go
new file mode 100644
index 000000000..21f513078
--- /dev/null
+++ b/pkg/driver/provisioner_test.go
@@ -0,0 +1,41 @@
+package driver
+
+import (
+ "testing"
+
+ "github.com/golang/mock/gomock"
+
+ "github.com/kubernetes-sigs/aws-efs-csi-driver/pkg/driver/mocks"
+)
+
+func TestProvisioner_GetCloud_NoRoleArnGivesOriginalObjectBack(t *testing.T) {
+ mockCtl := gomock.NewController(t)
+ mockCloud := mocks.NewMockCloud(mockCtl)
+
+ actualCloud, _, _, err := getCloud(map[string]string{}, mockCloud, false)
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ if actualCloud != mockCloud {
+ t.Fatalf("Expected cloud object to be %v but was %v", mockCloud, actualCloud)
+ }
+
+ mockCtl.Finish()
+}
+
+func TestProvisioner_GetCloud_WithRoleArnGivesNewObject(t *testing.T) {
+ mockCtl := gomock.NewController(t)
+ mockCloud := mocks.NewMockCloud(mockCtl)
+
+ actualCloud, _, _, err := getCloud(map[string]string{
+ RoleArn: "arn:aws:iam::1234567890:role/EFSCrossAccountRole",
+ }, mockCloud, false)
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ if actualCloud == mockCloud {
+ t.Fatalf("Unexpected cloud object: %v", actualCloud)
+ }
+
+ mockCtl.Finish()
+}
diff --git a/pkg/driver/sanity_test.go b/pkg/driver/sanity_test.go
index 1f748135b..11a41af69 100644
--- a/pkg/driver/sanity_test.go
+++ b/pkg/driver/sanity_test.go
@@ -70,17 +70,17 @@ func TestSanityEFSCSI(t *testing.T) {
mockCtrl := gomock.NewController(t)
mockCloud := cloud.NewFakeCloudProvider()
+ mounter := NewFakeMounter()
drv := Driver{
endpoint: endpoint,
nodeID: "sanity",
- mounter: NewFakeMounter(),
+ mounter: mounter,
efsWatchdog: &mockWatchdog{},
cloud: mockCloud,
nodeCaps: nodeCaps,
volMetricsOptIn: true,
volStatter: NewVolStatter(),
- gidAllocator: NewGidAllocator(),
- lockManager: NewLockManagerMap(),
+ provisioners: getProvisioners(mockCloud, mounter, nil, false, false, &FakeOsClient{}, false),
}
defer func() {
if r := recover(); r != nil {
diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go
index 2c5d8912b..1419c942a 100644
--- a/test/e2e/e2e.go
+++ b/test/e2e/e2e.go
@@ -3,12 +3,15 @@ package e2e
import (
"context"
"fmt"
+ e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
+ e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
"os"
"strconv"
"strings"
"time"
ginkgo "github.com/onsi/ginkgo/v2"
+ "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/kubernetes/test/e2e/framework/kubectl"
@@ -361,11 +364,11 @@ var _ = ginkgo.Describe("[efs-csi] EFS CSI", func() {
testEncryptInTransit(f, &encryptInTransit)
})
- ginkgo.It("should successfully perform dynamic provisioning", func() {
+ testPerformDynamicProvisioning := func(mode string) {
ginkgo.By("Creating EFS Storage Class, PVC and associated PV")
params := map[string]string{
- "provisioningMode": "efs-ap",
+ "provisioningMode": mode,
"fileSystemId": FileSystemId,
"subPathPattern": "${.PVC.name}",
"directoryPerms": "700",
@@ -414,6 +417,100 @@ var _ = ginkgo.Describe("[efs-csi] EFS CSI", func() {
if output != testData {
ginkgo.Fail("Read data does not match write data.")
}
+ }
+
+ for _, mode := range []string{"efs-ap", "efs-dir"} {
+ testName := fmt.Sprintf("should successfully perform dynamic provisioning in %s mode", mode)
+ ginkgo.It(testName, func() {
+ testPerformDynamicProvisioning(mode)
+ })
+ }
+
+ createProvisionedDirectory := func(f *framework.Framework, basePath string, pvcName string) (*v1.PersistentVolumeClaim, *storagev1.StorageClass) {
+ immediateBinding := storagev1.VolumeBindingImmediate
+ sc := storageframework.GetStorageClass("efs.csi.aws.com", map[string]string{
+ "provisioningMode": "efs-dir",
+ "fileSystemId": FileSystemId,
+ "basePath": basePath,
+ }, &immediateBinding, f.Namespace.Name)
+ sc, err := f.ClientSet.StorageV1().StorageClasses().Create(context.TODO(), sc, metav1.CreateOptions{})
+ framework.Logf("Created StorageClass %s", sc.Name)
+ framework.ExpectNoError(err, "creating dynamic provisioning storage class")
+ pvc := makeEFSPVC(f.Namespace.Name, pvcName, sc.Name)
+ pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Create(context.TODO(), pvc, metav1.CreateOptions{})
+ err = e2epv.WaitForPersistentVolumeClaimPhase(context.TODO(), v1.ClaimBound, f.ClientSet, f.Namespace.Name, pvc.Name,
+ time.Second*5, time.Minute)
+ framework.ExpectNoError(err, "waiting for pv to be provisioned and bound")
+ pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
+
+ framework.Logf("Created PVC %s, bound to PV %s by dynamic provisioning", sc.Name, pvc.Name, pvc.Spec.VolumeName)
+ return pvc, sc
+ }
+
+ ginkgo.It("should create a directory with the correct permissions when in directory provisioning mode", func() {
+ basePath := "dynamic_provisioning"
+ dynamicPvc, sc := createProvisionedDirectory(f, basePath, "directory-pvc-1")
+ defer func() {
+ err := f.ClientSet.StorageV1().StorageClasses().Delete(context.TODO(), sc.Name, metav1.DeleteOptions{})
+ framework.ExpectNoError(err, "removing provisioned StorageClass")
+ framework.Logf("Deleted StorageClass %s", sc.Name)
+ }()
+
+ pvc, pv, err := createEFSPVCPV(f.ClientSet, f.Namespace.Name, "root-dir-pvc-create", "/", map[string]string{})
+ defer func() {
+ _ = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{})
+ _ = f.ClientSet.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, metav1.DeleteOptions{})
+ }()
+ framework.ExpectNoError(err, "creating root mounted pv, pvc to check")
+
+ podSpec := e2epod.MakePod(f.Namespace.Name, nil, []*v1.PersistentVolumeClaim{pvc}, admissionapi.LevelBaseline, "")
+ podSpec.Spec.RestartPolicy = v1.RestartPolicyNever
+ pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), podSpec, metav1.CreateOptions{})
+ framework.ExpectNoError(err, "creating pod")
+ err = e2epod.WaitForPodRunningInNamespace(context.TODO(), f.ClientSet, pod)
+ framework.ExpectNoError(err, "pod started running successfully")
+
+ provisionedPath := fmt.Sprintf("/mnt/volume1/%s/%s", basePath, dynamicPvc.Spec.VolumeName)
+
+ perms, _, err := e2evolume.PodExec(f, pod, "stat -c \"%a\" "+provisionedPath)
+ framework.ExpectNoError(err, "ran stat command in /mnt/volume1 to get folder permissions")
+ framework.Logf("Perms Output: %s", perms)
+ gomega.Expect(perms).To(gomega.Equal(fmt.Sprintf("%d", 777)), "Checking File Permissions of mounted folder")
+
+ _ = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{})
+ })
+
+ ginkgo.It("should delete a directory provisioned in directory provisioning mode", func() {
+ basePath := "dynamic_provisioning_delete"
+ pvc, sc := createProvisionedDirectory(f, basePath, "directory-pvc-2")
+ defer func() {
+ err := f.ClientSet.StorageV1().StorageClasses().Delete(context.TODO(), sc.Name, metav1.DeleteOptions{})
+ framework.ExpectNoError(err, "removing provisioned StorageClass")
+ framework.Logf("Deleted StorageClass %s", sc.Name)
+ }()
+ volumeName := pvc.Spec.VolumeName
+
+ err := f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(context.TODO(), pvc.Name,
+ metav1.DeleteOptions{})
+ framework.ExpectNoError(err, "deleting pvc")
+
+ pvc, pv, err := createEFSPVCPV(f.ClientSet, f.Namespace.Name, "root-dir-pvc-delete", "/", map[string]string{})
+ defer func() {
+ _ = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{})
+ _ = f.ClientSet.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, metav1.DeleteOptions{})
+ }()
+ framework.ExpectNoError(err, "creating root mounted pv, pvc to check")
+
+ podSpec := e2epod.MakePod(f.Namespace.Name, nil, []*v1.PersistentVolumeClaim{pvc}, admissionapi.LevelBaseline, "")
+ podSpec.Spec.RestartPolicy = v1.RestartPolicyNever
+ pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), podSpec, metav1.CreateOptions{})
+ framework.ExpectNoError(err, "creating pod")
+ err = e2epod.WaitForPodRunningInNamespace(context.TODO(), f.ClientSet, pod)
+ framework.ExpectNoError(err, "pod started running successfully")
+
+ e2evolume.VerifyExecInPodFail(f, pod, "test -d "+"/mnt/volume1/"+basePath+"/"+volumeName, 1)
+
+ _ = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{})
})
})
@@ -476,7 +573,7 @@ func createEFSPVCPV(c clientset.Interface, namespace, name, path string, volumeA
}
func makeEFSPVCPV(namespace, name, path string, volumeAttributes map[string]string) (*v1.PersistentVolumeClaim, *v1.PersistentVolume) {
- pvc := makeEFSPVC(namespace, name)
+ pvc := makeEFSPVC(namespace, name, "")
pv := makeEFSPV(name, path, volumeAttributes)
pvc.Spec.VolumeName = pv.Name
pv.Spec.ClaimRef = &v1.ObjectReference{
@@ -486,8 +583,7 @@ func makeEFSPVCPV(namespace, name, path string, volumeAttributes map[string]stri
return pvc, pv
}
-func makeEFSPVC(namespace, name string) *v1.PersistentVolumeClaim {
- storageClassName := ""
+func makeEFSPVC(namespace, name string, storageClassName string) *v1.PersistentVolumeClaim {
return &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: name,