-
Notifications
You must be signed in to change notification settings - Fork 1k
Labels
Description
Running terraform apply on tektkon task in EKS cluster 1.30 using terraform 1.5.7
Terraform Version, Provider Version and Kubernetes Version
Terraform version: 1.5.7
Kubernetes provider version: v2.38.0_x5
Kubernetes version: 1.30 EKS
Affected Resource(s)
Creation of tekton task used to validate the existence of an s3 bucket in AWS
Terraform Configuration Files
# validation.tf -
resource "kubernetes_manifest" "validation_pipeline" {
count = var.deploy_tekton ? 1 : 0
manifest = {
apiVersion = "tekton.dev/v1beta1"
kind = "Pipeline"
metadata = {
name = "validation-pipeline"
namespace = "tekton-operator"
}
spec = {
description = "Pipeline for setting validation environments with S3 backend preparation"
params = [
{
name = "cluster-name"
type = "string"
description = "Name of the validation cluster to create"
}
]
tasks = [
{
name = "s3-prepare"
taskRef = {
name = "validation-s3-prepare"
}
params = [
{
name = "cluster-name"
value = "$(params.cluster-name)"
}
]
}
]
}
}
}
# validation-s3-taskrun.tf - line 4
resource "kubernetes_manifest" "validation_s3_prepare_taskrun_template" {
count = var.deploy_tekton ? 1 : 0
manifest = {
apiVersion = "tekton.dev/v1beta1"
kind = "TaskRun"
metadata = {
name = "validation-s3-prepare-template"
namespace = "tekton-operator"
}
spec = {
serviceAccountName = kubernetes_service_account_v1.tekton_terraform[0].metadata[0].name
taskRef = {
name = "validation-s3-prepare"
}
params = [
{
name = "cluster-name"
value = "example-validation-cluster"
}
]
}
}
}Debug Output
Stack trace shows panic in terraform-provider-kubernetes with ElementKeyInt(0) type mismatch error in tftypes handling.
Panic Output
Stack trace from the terraform-provider-kubernetes_v2.38.0_x5 plugin:
panic: ElementKeyInt(0): can't use tftypes.Object["description":tftypes.String, "displayName":tftypes.String,
"matrix":tftypes.Object["include":tftypes.Tuple[tftypes.Object["name":tftypes.String,
"params":tftypes.Tuple[tftypes.Object["name":tftypes.String, "value":tftypes.DynamicPseudoType]]]],
"params":tftypes.Tuple[tftypes.Object["name":tftypes.String, "value":tftypes.DynamicPseudoType]]], "name":tftypes.String,
"onError":tftypes.String, "params":tftypes.Tuple[tftypes.Object["name":tftypes.String, "value":tftypes.String],
tftypes.Object["name":tftypes.String, "value":tftypes.String], tftypes.Object["name":tftypes.String, "value":tftypes.String]],
"pipelineRef":tftypes.Object["apiVersion":tftypes.String, "bundle":tftypes.String, "name":tftypes.String,
"params":tftypes.Tuple[tftypes.Object["name":tftypes.String, "value":tftypes.DynamicPseudoType]], "resolver":tftypes.String],
"pipelineSpec":tftypes.DynamicPseudoType,
"resources":tftypes.Object["inputs":tftypes.List[tftypes.Object["from":tftypes.List[tftypes.String], "name":tftypes.String,
"resource":tftypes.String]], "outputs":tftypes.List[tftypes.Object["name":tftypes.String, "resource":tftypes.String]]],
"retries":tftypes.Number, "runAfter":tftypes.List[tftypes.String], "taskRef":tftypes.Object["apiVersion":tftypes.String,
"bundle":tftypes.String, "kind":tftypes.String, "name":tftypes.String, "params":tftypes.Tuple[tftypes.Object["name":tftypes.String,
"value":tftypes.DynamicPseudoType]], "resolver":tftypes.String], "taskSpec":tftypes.DynamicPseudoType, "timeout":tftypes.String,
"when":tftypes.List[tftypes.Object["cel":tftypes.String, "input":tftypes.String, "operator":tftypes.String,
"values":tftypes.List[tftypes.String]]], "workspaces":tftypes.List[tftypes.Object["name":tftypes.String, "subPath":tftypes.String,
"workspace":tftypes.String]]] as tftypes.Object["description":tftypes.String, "displayName":tftypes.String,
"matrix":tftypes.Object["include":tftypes.Tuple[tftypes.Object["name":tftypes.String,
"params":tftypes.Tuple[tftypes.Object["name":tftypes.String, "value":tftypes.DynamicPseudoType]]]],
"params":tftypes.Tuple[tftypes.Object["name":tftypes.String, "value":tftypes.DynamicPseudoType]]], "name":tftypes.String,
"onError":tftypes.String, "params":tftypes.Tuple[tftypes.Object["name":tftypes.String, "value":tftypes.DynamicPseudoType]],
"pipelineRef":tftypes.Object["apiVersion":tftypes.String, "bundle":tftypes.String, "name":tftypes.String,
"params":tftypes.Tuple[tftypes.Object["name":tftypes.String, "value":tftypes.DynamicPseudoType]], "resolver":tftypes.String],
"pipelineSpec":tftypes.DynamicPseudoType,
"resources":tftypes.Object["inputs":tftypes.List[tftypes.Object["from":tftypes.List[tftypes.String], "name":tftypes.String,
"resource":tftypes.String]], "outputs":tftypes.List[tftypes.Object["name":tftypes.String, "resource":tftypes.String]]],
"retries":tftypes.Number, "runAfter":tftypes.List[tftypes.String], "taskRef":tftypes.Object["apiVersion":tftypes.String,
"bundle":tftypes.String, "kind":tftypes.String, "name":tftypes.String, "params":tftypes.Tuple[tftypes.Object["name":tftypes.String,
"value":tftypes.DynamicPseudoType]], "resolver":tftypes.String], "taskSpec":tftypes.DynamicPseudoType, "timeout":tftypes.String,
"when":tftypes.List[tftypes.Object["cel":tftypes.String, "input":tftypes.String, "operator":tftypes.String,
"values":tftypes.List[tftypes.String]]], "workspaces":tftypes.List[tftypes.Object["name":tftypes.String, "subPath":tftypes.String,
"workspace":tftypes.String]]]
goroutine 269 [running]:
github.com/hashicorp/terraform-plugin-go/tftypes.NewValue(...)
github.com/hashicorp/[email protected]/tftypes/value.go:278
github.com/hashicorp/terraform-provider-kubernetes/manifest/morph.DeepUnknown({0x1050cb098, 0x1400250e0c0}, {{0x1050cb098?, 0x14002dca030?},
{0x104c1f840?, 0x14002b6a8a0?}}, 0x14002b6ad38)
github.com/hashicorp/terraform-provider-kubernetes/manifest/morph/scaffold.go:86 +0x15e8
github.com/hashicorp/terraform-provider-kubernetes/manifest/morph.DeepUnknown({0x1050caa40, 0x14002c9cb40}, {{0x1050caa40?, 0x14002dcaf90?},
{0x104dce7e0?, 0x14002dc20f0?}}, 0x14002b6a900)
github.com/hashicorp/terraform-provider-kubernetes/manifest/morph/scaffold.go:33 +0x18cc
github.com/hashicorp/terraform-provider-kubernetes/manifest/morph.DeepUnknown({0x1050caa40, 0x14002dbeea0}, {{0x1050caa40?, 0x14002de8120?},
{0x104dce7e0?, 0x14002dbf110?}}, 0x14002b6a8d0)
github.com/hashicorp/terraform-provider-kubernetes/manifest/morph/scaffold.go:33 +0x18cc
github.com/hashicorp/terraform-provider-kubernetes/manifest/provider.(*RawProviderServer).PlanResourceChange(0x14000b16ea0, {0x1050c1968,
0x140035c3530}, 0x14001071aa0)
github.com/hashicorp/terraform-provider-kubernetes/manifest/provider/plan.go:395 +0x2d78
github.com/hashicorp/terraform-plugin-mux/tf5muxserver.(*muxServer).PlanResourceChange(0x1400059e580, {0x1050c1968?, 0x140035c3260?},
0x14001071aa0)
github.com/hashicorp/[email protected]/tf5muxserver/mux_server_PlanResourceChange.go:73 +0x244
github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server.(*server).PlanResourceChange(0x14000252780, {0x1050c1968?, 0x140035c2a20?},
0x1400106b100)
github.com/hashicorp/[email protected]/tfprotov5/tf5server/server.go:949 +0x2a8
github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5._Provider_PlanResourceChange_Handler({0x1050281e0, 0x14000252780},
{0x1050c1968, 0x140035c2a20}, 0x1400106b080, 0x0)
github.com/hashicorp/[email protected]/tfprotov5/internal/tfplugin5/tfplugin5_grpc.pb.go:669 +0x1c0
google.golang.org/grpc.(*Server).processUnaryRPC(0x14000ef4000, {0x1050c1968, 0x140035c2990}, 0x140010718c0, 0x14000ef65a0, 0x106228430,
0x0)
google.golang.org/[email protected]/server.go:1405 +0xc9c
google.golang.org/grpc.(*Server).handleStream(0x14000ef4000, {0x1050c1fb0, 0x14000bd2d00}, 0x140010718c0)
google.golang.org/[email protected]/server.go:1815 +0x900
google.golang.org/grpc.(*Server).serveStreams.func2.1()
google.golang.org/[email protected]/server.go:1035 +0x84
created by google.golang.org/grpc.(*Server).serveStreams.func2 in goroutine 52
google.golang.org/[email protected]/server.go:1046 +0x138
Error: The terraform-provider-kubernetes_v2.38.0_x5 plugin crashed!
This is always indicative of a bug within the plugin. It would be immensely
helpful if you could report the crash with the plugin's maintainers so that it
can be fixed. The output above should help diagnose the issue.
Steps to Reproduce
Run terraform plan on provided hcl
Expected Behavior
Terraform should successfully plan the Tekton Pipeline and TaskRun kubernetes_manifest resources without crashing.
Actual Behavior
The terraform-provider-kubernetes plugin crashes with a panic
Important Factoids
- Using Tekton v1beta1 API
- EKS cluster with IRSA service account references in TaskRun specs
References
IkeHunter